Merge "Add defects mapping for reprod bugs function"
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index 9ceea67..0434b19 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -75,7 +75,8 @@
}
// main
-throttle(['fuel_devops_environment']) {
+// Temporary disable throttle to check how it will run
+//throttle(['fuel_devops_environment']) {
node ("${NODE_NAME}") {
try {
// run deploy stages
@@ -106,4 +107,4 @@
}
}
-}
\ No newline at end of file
+//}
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
index 36ea29a..998d55b 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -19,6 +19,10 @@
* COOKIECUTTER_TEMPLATE_COMMIT Commit/tag/branch for cookiecutter-templates repository. If empty, then takes ${MCP_VERSION} value
* SALT_MODELS_SYSTEM_COMMIT Commit/tag/branch for reclass-system repository. If empty, then takes ${MCP_VERSION} value
* SHUTDOWN_ENV_ON_TEARDOWN optional, shutdown fuel-devops environment at the end of the job
+ * MCP_SALT_REPO_URL Base URL for MCP repositories required to bootstrap cfg01 node. Leave blank to use default
+ * (http://mirror.mirantis.com/ from mcp-common-scripts)
+ * MCP_SALT_REPO_KEY URL of the key file. Leave blank to use default
+ * (${MCP_SALT_REPO_URL}/${MCP_VERSION}/salt-formulas/xenial/archive-salt-formulas.key from mcp-common-scripts)
*
*/
@@ -108,7 +112,7 @@
} catch (e) {
common.printMsg("Saltstack cluster deploy is failed", "purple")
if (fileExists(xml_report_name)) {
- shared.download_logs("deploy_salt")
+ shared.download_logs("deploy_salt_${ENV_NAME}")
def String junit_report_xml = readFile(xml_report_name)
def String junit_report_xml_pretty = new XmlUtil().serialize(junit_report_xml)
throw new Exception(junit_report_xml_pretty)
diff --git a/jobs/pipelines/swarm-create-cfg-config-drive.groovy b/jobs/pipelines/swarm-create-cfg-config-drive.groovy
index 72d278f..46d5010 100644
--- a/jobs/pipelines/swarm-create-cfg-config-drive.groovy
+++ b/jobs/pipelines/swarm-create-cfg-config-drive.groovy
@@ -152,7 +152,10 @@
stage("Set data"){
for (i in entries(smc)) {
- sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" ${user_data_script_path}"
+ // Replace only if the variable is non-empty, leave default otherwise
+ if (i[1]) {
+ sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" ${user_data_script_path}"
+ }
}
}
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
index 58474b9..1939b4d 100644
--- a/jobs/pipelines/swarm-deploy-cicd.groovy
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -62,7 +62,7 @@
} catch (e) {
common.printMsg("Job is failed", "purple")
- shared.download_logs("deploy_drivetrain")
+ shared.download_logs("deploy_drivetrain_${ENV_NAME}")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-deploy-platform.groovy b/jobs/pipelines/swarm-deploy-platform.groovy
index c854c73..cb26aae 100644
--- a/jobs/pipelines/swarm-deploy-platform.groovy
+++ b/jobs/pipelines/swarm-deploy-platform.groovy
@@ -62,7 +62,7 @@
} catch (e) {
common.printMsg("Job is failed", "purple")
- shared.download_logs("deploy_platform")
+ shared.download_logs("deploy_platform_${ENV_NAME}")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index 780229d..99231f9 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -17,6 +17,7 @@
* MCP_IMAGE_PATH1604 Not used (backward compatibility, for manual deployment steps only)
* IMAGE_PATH_CFG01_DAY01 Not used (backward compatibility, for manual deployment steps only)
* TEMPEST_IMAGE_VERSION Tempest image version: pike by default, can be queens.
+ * TEMPEST_TARGET Node where tempest will be run
*/
@Library('tcp-qa')_
@@ -56,6 +57,7 @@
if (steps.contains('openstack')) {
sources += """
export TEMPEST_IMAGE_VERSION=${TEMPEST_IMAGE_VERSION}
+ export TEMPEST_TARGET=${TEMPEST_TARGET}
# TODO: . ./tcp_tests/utils/env_keystonercv3\n"""
}
def installed = steps.collect {"""\
@@ -74,7 +76,7 @@
""")
def snapshot_name = "test_completed"
- shared.download_logs("test_completed")
+ shared.download_logs("test_completed_${ENV_NAME}")
shared.run_cmd("""\
dos.py suspend ${ENV_NAME}
dos.py snapshot ${ENV_NAME} ${snapshot_name}
@@ -91,7 +93,7 @@
common.printMsg("Job is failed", "purple")
// Downloading logs usually not needed here
// because tests should use the decorator @pytest.mark.grab_versions
- // shared.download_logs("test_failed")
+ // shared.download_logs("test_failed_${ENV_NAME}")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index 92b43b2..32ac0e1 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -159,6 +159,12 @@
def pipeline_library_ref = env.PIPELINE_LIBRARY_REF ?: ''
def cookiecutter_ref_change = env.COOKIECUTTER_REF_CHANGE ?: ''
def environment_template_ref_change = env.ENVIRONMENT_TEMPLATE_REF_CHANGE ?: ''
+ def mcp_salt_repo_url = env.MCP_SALT_REPO_URL ?: ''
+ def mcp_salt_repo_key = env.MCP_SALT_REPO_KEY ?: ''
+ def env_ipmi_user = env.IPMI_USER ?: ''
+ def env_ipmi_pass = env.IPMI_PASS ?: ''
+ def env_lab_mgm_iface = env.LAB_MANAGEMENT_IFACE ?: ''
+ def env_lab_ctl_iface = env.LAB_CONTROL_IFACE ?: ''
def parameters = [
string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
string(name: 'PARENT_WORKSPACE', value: pwd()),
@@ -175,6 +181,12 @@
string(name: 'SALT_MODELS_SYSTEM_COMMIT', value: "${salt_models_system_commit}"),
string(name: 'COOKIECUTTER_REF_CHANGE', value: "${cookiecutter_ref_change}"),
string(name: 'ENVIRONMENT_TEMPLATE_REF_CHANGE', value: "${environment_template_ref_change}"),
+ string(name: 'MCP_SALT_REPO_URL', value: "${mcp_salt_repo_url}"),
+ string(name: 'MCP_SALT_REPO_KEY', value: "${mcp_salt_repo_key}"),
+ string(name: 'IPMI_USER', value: env_ipmi_user),
+ string(name: 'IPMI_PASS', value: env_ipmi_pass),
+ string(name: 'LAB_MANAGEMENT_IFACE', value: env_lab_mgm_iface),
+ string(name: 'LAB_CONTROL_IFACE', value: env_lab_ctl_iface),
booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
]
@@ -218,6 +230,7 @@
def common = new com.mirantis.mk.Common()
def tcp_qa_refs = env.TCP_QA_REFS ?: ''
def tempest_image_version = env.TEMPEST_IMAGE_VERSION ?: 'pike'
+ def tempest_target=env.TEMPEST_TARGET ?: 'gtw01'
def parameters = [
string(name: 'ENV_NAME', value: "${ENV_NAME}"),
string(name: 'PASSED_STEPS', value: passed_steps),
@@ -231,6 +244,7 @@
string(name: 'MCP_IMAGE_PATH1604', value: "${MCP_IMAGE_PATH1604}"),
string(name: 'IMAGE_PATH_CFG01_DAY01', value: "${IMAGE_PATH_CFG01_DAY01}"),
string(name: 'TEMPEST_IMAGE_VERSION', value: "${tempest_image_version}"),
+ string(name: 'TEMPEST_TARGET', value: "${tempest_target}"),
]
common.printMsg("Start building job 'swarm-run-pytest' with parameters:", "purple")
@@ -246,6 +260,7 @@
def tempest_test_suite_name = env.TEMPEST_TEST_SUITE_NAME
def parameters = [
string(name: 'ENV_NAME', value: "${ENV_NAME}"),
+ string(name: 'LAB_CONFIG_NAME', value: "${LAB_CONFIG_NAME}"),
string(name: 'MCP_VERSION', value: "${MCP_VERSION}"),
string(name: 'PASSED_STEPS', value: passed_steps),
string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
@@ -291,6 +306,9 @@
string(name: 'IPV4_NET_CONTROL', value: IPV4_NET_CONTROL),
string(name: 'IPV4_NET_TENANT', value: IPV4_NET_TENANT),
string(name: 'IPV4_NET_EXTERNAL', value: IPV4_NET_EXTERNAL),
+ string(name: 'IPMI_USER', value: env.IPMI_USER),
+ string(name: 'IPMI_PASS', value: env.IPMI_PASS),
+ string(name: 'IMAGE_PATH_CFG01_DAY01', value: env.IMAGE_PATH_CFG01_DAY01),
]
build_shell_job('swarm-cookied-model-generator', parameters, "deploy_generate_model.xml")
@@ -317,6 +335,8 @@
def mk_pipelines_ref = env.MK_PIPELINES_REF ?: ''
def pipeline_library_ref = env.PIPELINE_LIBRARY_REF ?: ''
def tcp_qa_refs = env.TCP_QA_REFS ?: ''
+ def mcp_salt_repo_url = env.MCP_SALT_REPO_URL ?: ''
+ def mcp_salt_repo_key = env.MCP_SALT_REPO_KEY ?: ''
def parameters = [
string(name: 'CLUSTER_NAME', value: "${LAB_CONFIG_NAME}"),
@@ -331,8 +351,8 @@
string(name: 'DEPLOY_NETWORK_GW', value: "${ADMIN_NETWORK_GW}"),
string(name: 'PIPELINE_REPO_URL', value: "https://github.com/Mirantis"),
booleanParam(name: 'PIPELINES_FROM_ISO', value: true),
- string(name: 'MCP_SALT_REPO_URL', value: "http://apt.mirantis.com/xenial"),
- string(name: 'MCP_SALT_REPO_KEY', value: "http://apt.mirantis.com/public.gpg"),
+ string(name: 'MCP_SALT_REPO_URL', value: "${mcp_salt_repo_url}"),
+ string(name: 'MCP_SALT_REPO_KEY', value: "${mcp_salt_repo_key}"),
string(name: 'PIPELINE_LIBRARY_REF', value: "${pipeline_library_ref}"),
string(name: 'MK_PIPELINES_REF', value: "${mk_pipelines_ref}"),
string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
@@ -508,7 +528,7 @@
def upload_results_to_testrail(report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options=[]) {
def venvPath = '/home/jenkins/venv_testrail_reporter'
- def testPlanDesc = env.ENV_NAME
+ def testPlanDesc = env.LAB_CONFIG_NAME
def testrailURL = "https://mirantis.testrail.com"
def testrailProject = "Mirantis Cloud Platform"
def testPlanName = "[MCP-Q2]System-${MCP_VERSION}-${new Date().format('yyyy-MM-dd')}"
diff --git a/tcp_tests/fixtures/k8s_fixtures.py b/tcp_tests/fixtures/k8s_fixtures.py
index 409034e..8cf5bc0 100644
--- a/tcp_tests/fixtures/k8s_fixtures.py
+++ b/tcp_tests/fixtures/k8s_fixtures.py
@@ -171,6 +171,30 @@
@pytest.fixture(scope='function')
+def conformance_helper(request, func_name, k8s_actions):
+ prepare_log = request.keywords.get('prepare_log', None)
+ merge_xunit = request.keywords.get('merge_xunit', None)
+ download_target = request.keywords.get('download', None)
+
+ def test_fin():
+ if hasattr(request.node, 'rep_call') and \
+ (request.node.rep_call.passed or request.node.rep_call.failed)\
+ and download_target:
+ files = utils.extract_name_from_mark(download_target) \
+ or "{}".format(func_name)
+ logfile = utils.extract_name_from_mark(prepare_log, 'filepath')
+ if prepare_log:
+ k8s_actions.move_file_to_root_folder(logfile)
+ if merge_xunit:
+ path = utils.extract_name_from_mark(merge_xunit, 'path')
+ output = utils.extract_name_from_mark(merge_xunit, 'output')
+ k8s_actions.combine_xunit(path, output)
+ k8s_actions.download_k8s_logs(files)
+
+ request.addfinalizer(test_fin)
+
+
+@pytest.fixture(scope='function')
def k8s_cncf_log_helper(request, func_name, underlay, k8s_deployed):
"""Finalizer to prepare cncf tar.gz and save results from archive"""
diff --git a/tcp_tests/fixtures/runtest_fixtures.py b/tcp_tests/fixtures/runtest_fixtures.py
index 3e1a45b..53342ae 100644
--- a/tcp_tests/fixtures/runtest_fixtures.py
+++ b/tcp_tests/fixtures/runtest_fixtures.py
@@ -26,13 +26,11 @@
tempest_pattern = settings.TEMPEST_PATTERN
cluster_name = settings.LAB_CONFIG_NAME
domain_name = settings.DOMAIN_NAME
- target = settings.TEMPEST_TARGET
runtest = RuntestManager(
config,
underlay_actions, salt_actions,
cluster_name=cluster_name,
domain_name=domain_name,
tempest_threads=tempest_threads,
- tempest_pattern=tempest_pattern,
- target=target)
+ tempest_pattern=tempest_pattern)
return runtest
diff --git a/tcp_tests/managers/k8s/pods.py b/tcp_tests/managers/k8s/pods.py
index 98192a6..4426170 100644
--- a/tcp_tests/managers/k8s/pods.py
+++ b/tcp_tests/managers/k8s/pods.py
@@ -53,7 +53,7 @@
'"{1}"'.format(self.name, phases))
return self
- def wait_running(self, timeout=240, interval=3):
+ def wait_running(self, timeout=600, interval=3):
return self.wait_phase('Running', timeout=timeout, interval=interval)
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 79974d3..d1a9a87 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -41,6 +41,7 @@
self._api = None
self.kubectl = K8SKubectlCli(self)
self.virtlet = K8SVirtlet(self)
+ self.conformance_node = None
super(K8SManager, self).__init__(config=config, underlay=underlay)
def install(self, commands):
@@ -160,7 +161,7 @@
:return: str, IP address
"""
ctl_vip_pillar = self._salt.get_pillar(
- tgt="I@kubernetes:control:enabled:True",
+ tgt="I@kubernetes:control",
pillar="_param:cluster_vip_address")[0]
return ctl_vip_pillar.values()[0]
@@ -299,6 +300,80 @@
timeout_msg="Timeout for CNCF reached."
)
+ def determine_conformance_node(self, target):
+ masters_fqdn = self._salt.get_pillar(
+ tgt='I@kubernetes:master', pillar='linux:network:fqdn')
+ node_names = [v for pillar in masters_fqdn for
+ k, v in pillar.items()]
+ return [node_name for node_name
+ in node_names
+ if node_name.startswith(target)][0]
+
+ def start_conformance_inside_pod(self, cnf_type='k8s', timeout=60 * 60):
+ """
+ Create conformance pod and wait for results
+ :param cnf_type: k8s or virtlet. choose what conformance you want
+ :param timeout:
+ :return:
+ """
+ if cnf_type == 'k8s':
+ pod_mark = 'conformance'
+ elif cnf_type == 'virtlet':
+ pod_mark = 'virtlet-conformance'
+ else:
+ LOG.error("Unknown conformance type or it even not set")
+ raise RuntimeError("Unknown conformance type")
+ conformance_cmd = "kubectl apply -f /srv/kubernetes/{}.yml" \
+ "".format(pod_mark)
+ self.controller_check_call(conformance_cmd, timeout=900)
+
+ cnf_pod = self.api.pods.get(pod_mark, pod_mark)
+ cnf_pod.wait_running()
+
+ pod = cnf_pod.read()
+ target = "{}.".format(pod.spec.node_name)
+
+ self.conformance_node = self.determine_conformance_node(target)
+
+ def cnf_status():
+ pod = cnf_pod.read()
+ status = pod.status.phase
+ LOG.info("Conformance status: {}".format(status))
+ return status
+
+ LOG.info("Waiting for Conformance to complete")
+ helpers.wait(
+ lambda: cnf_status() in ('Succeeded', 'Failed'),
+ interval=120, timeout=timeout,
+ timeout_msg="Timeout for Conformance reached."
+ )
+
+ pod = cnf_pod.read()
+ status = pod.status.phase
+ if status == 'Failed':
+ describe = "kubectl describe po {0} -n {0}".format(pod_mark)
+ LOG.info(self.controller_check_call(describe, timeout=30))
+ raise RuntimeError("Conformance failed")
+
+ def get_node_name(self, tgt):
+ res = [node_name for node_name in
+ self.__underlay.node_names() if tgt in node_name]
+ assert len(res) > 0, 'Can not find node name by tgt {}'.format(tgt)
+ return res[0]
+
+ def move_file_to_root_folder(self, filepath):
+ cmd = "mv {0} /root/".format(filepath)
+ if self.conformance_node:
+ short_name = self.conformance_node.split('.')[0]
+ LOG.info("Managing results on {}".format(self.conformance_node))
+ step = {'cmd': cmd, 'node_name': self.get_node_name(
+ tgt=short_name)}
+ LOG.info('Move {0} to /root/ on {1}'.format(
+ filepath, self.conformance_node))
+ self.execute_command(step, 'Move files')
+ else:
+ LOG.info("Node is not properly set")
+
def extract_file_to_node(self, system='docker',
container='virtlet',
file_path='report.xml',
@@ -315,7 +390,7 @@
:return:
"""
with self.__underlay.remote(node_name=self.controller_name) as remote:
- if system is 'docker':
+ if system == 'docker':
cmd = ("docker ps --all | grep \"{0}\" |"
" awk '{{print $1}}'".format(container))
result = remote.check_call(cmd, raise_on_err=False)
@@ -343,12 +418,17 @@
:param files:
:return:
"""
+ if self.conformance_node:
+ node = self.conformance_node
+ else:
+ node = self.controller_name
+ LOG.info("Trying to get logs at {}".format(node))
master_host = self.__config.salt.salt_master_host
with self.__underlay.remote(host=master_host) as r:
for log_file in files:
- cmd = "rsync -r \"{0}:/root/{1}\" /root/".format(
- self.controller_name, log_file)
- r.check_call(cmd, raise_on_err=False)
+ cmd = "scp -r \"{0}:/root/{1}\" /root/".format(
+ node, log_file)
+ r.check_call(cmd, raise_on_err=True)
LOG.info("Downloading the artifact {0}".format(log_file))
r.download(destination=log_file, target=os.getcwd())
self.store_server_version(os.path.join(os.getcwd(), 'env_k8s_version'))
@@ -362,18 +442,22 @@
:param output: Path to xml file where output will stored
:return:
"""
- with self.__underlay.remote(node_name=self.controller_name) as r:
- cmd = ("apt-get install python-setuptools -y; "
- "pip install "
- "https://github.com/mogaika/xunitmerge/archive/master.zip")
- LOG.debug('Installing xunitmerge')
- r.check_call(cmd, raise_on_err=False)
- LOG.debug('Merging xunit')
- cmd = ("cd {0}; arg=''; "
- "for i in $(ls | grep xml); "
- "do arg=\"$arg $i\"; done && "
- "xunitmerge $arg {1}".format(path, output))
- r.check_call(cmd, raise_on_err=False)
+ if self.conformance_node:
+ node = self.conformance_node
+ else:
+ node = self.controller_name
+ LOG.info("Trying to combine xunit at {}".format(node))
+ cmd = ("apt-get install python-setuptools -y; "
+ "pip install "
+ "https://github.com/mogaika/xunitmerge/archive/master.zip")
+ LOG.debug('Installing xunitmerge')
+ self._salt.cmd_run(tgt=node, cmd=cmd)
+ LOG.debug('Merging xunit')
+ cmd = ("cd {0}; arg=''; "
+ "for i in $(ls | grep xml); "
+ "do arg=\"$arg $i\"; done && "
+ "xunitmerge $arg {1} || true".format(path, output))
+ self._salt.cmd_run(tgt=node, cmd=cmd)
def manage_cncf_archive(self):
"""
diff --git a/tcp_tests/managers/runtestmanager.py b/tcp_tests/managers/runtestmanager.py
index 6c1e874..a45663b 100644
--- a/tcp_tests/managers/runtestmanager.py
+++ b/tcp_tests/managers/runtestmanager.py
@@ -33,15 +33,13 @@
image_version = settings.TEMPEST_IMAGE_VERSION
container_name = 'run-tempest-ci'
master_host = "cfg01"
- control_host = "ctl01"
- compute_host = "cmp"
class_name = "runtest"
run_cmd = '/bin/bash -c "run-tempest"'
def __init__(self, config, underlay, salt_api, cluster_name,
domain_name, tempest_threads,
tempest_pattern=settings.TEMPEST_PATTERN,
- run_cmd=None, target='gtw01'):
+ run_cmd=None):
self.__config = config
self.underlay = underlay
self.__salt_api = salt_api
@@ -50,100 +48,25 @@
self.tempest_threads = tempest_threads
self.tempest_pattern = tempest_pattern
self.run_cmd = run_cmd or self.run_cmd
- self.target_name = self.underlay.get_target_node_names(target)[0]
self.master_name = self.underlay.get_target_node_names(
self.master_host)[0]
- self.control_name = self.underlay.get_target_node_names(
- self.control_host)[0]
- self.compute_name = self.underlay.get_target_node_names(
- self.compute_host)[0]
- self.barbican = False
+ self.__target_name = None
@property
def salt_api(self):
return self.__salt_api
@property
- def runtest_pillar(self):
- public_net = self.__config.underlay.dhcp_ranges[
- settings.EXTERNAL_ADDRESS_POOL_NAME]
- public_gateway = public_net["gateway"].encode("ascii")
- public_cidr = public_net["cidr"].encode("ascii")
- public_allocation_start = public_net["start"].encode("ascii")
- public_allocation_end = public_net["end"].encode("ascii")
- tempest_test_target = self.target_name.encode("ascii") + "*"
-
- pillar = {
- 'classes': ['service.runtest.tempest',
- 'service.runtest.tempest.public_net',
- 'service.runtest.tempest.services.manila.glance'],
- 'parameters': {
- '_param': {
- 'runtest_tempest_cfg_dir': TEMPEST_CFG_DIR,
- 'runtest_tempest_cfg_name': 'tempest.conf',
- 'runtest_tempest_public_net': 'public',
- 'openstack_public_neutron_subnet_gateway': public_gateway,
- 'openstack_public_neutron_subnet_cidr': public_cidr,
- 'openstack_public_neutron_subnet_allocation_start':
- public_allocation_start,
- 'openstack_public_neutron_subnet_allocation_end':
- public_allocation_end,
- 'tempest_test_target': tempest_test_target,
- 'glance_image_cirros_location':
- 'http://cz8133.bud.mirantis.net:8099'
- '/cirros-0.3.5-x86_64-disk.img',
- 'glance_image_fedora_location':
- 'http://cz8133.bud.mirantis.net:8099'
- '/Fedora-Cloud-Base-27-1.6.x86_64.qcow2',
- 'glance_image_manila_location':
- 'http://cz8133.bud.mirantis.net:8099'
- '/manila-service-image-master.qcow2',
- },
- 'neutron': {
- 'client': {
- 'enabled': True
- }
- },
- 'runtest': {
- 'enabled': True,
- 'keystonerc_node': 'ctl01*',
- 'tempest': {
- 'enabled': True,
- 'cfg_dir': '${_param:runtest_tempest_cfg_dir}',
- 'cfg_name': '${_param:runtest_tempest_cfg_name}',
- 'put_keystone_rc_enabled': True,
- 'put_local_image_file_enabled': False,
- 'DEFAULT': {
- 'log_file': 'tempest.log'
- },
- 'compute': {
- 'min_compute_nodes': 2,
- },
- 'convert_to_uuid': {
- 'network': {
- 'public_network_id':
- '${_param:runtest_tempest_public_net}'
- }
- },
- 'heat_plugin': {
- 'build_timeout': '600'
- },
- 'share': {
- 'capability_snapshot_support': True,
- 'run_driver_assisted_migration_tests': False,
- 'run_manage_unmanage_snapshot_tests': False,
- 'run_manage_unmanage_tests': False,
- 'run_migration_with_preserve_snapshots_tests':
- False,
- 'run_quota_tests': True,
- 'run_replication_tests': False,
- 'run_snapshot_tests': True,
- }}}}}
-
- if self.barbican:
- pillar['classes'].append('service.runtest.tempest.barbican')
-
- return pillar
+ def target_name(self):
+ if not self.__target_name:
+ target_host = self.__salt_api.get_single_pillar(
+ tgt=self.master_name,
+ pillar="runtest:tempest:test_target")
+ if target_host[-1] == "*":
+ target_host = target_host[:-1]
+ self.__target_name = self.underlay.get_target_node_names(
+ target_host)[0]
+ return self.__target_name
def fetch_arficats(self, username=None, file_format='xml'):
with self.underlay.remote(node_name=self.target_name,
@@ -159,25 +82,6 @@
destination=report, # noqa
target=os.getcwd())
- def store_runtest_model(self, runtest_pillar=None):
- with self.underlay.yaml_editor(
- file_path="/srv/salt/reclass/classes/cluster/"
- "{cluster_name}/infra/"
- "{class_name}.yml".format(
- cluster_name=self.cluster_name,
- class_name=self.class_name),
- node_name=self.master_name) as editor:
- editor.content = runtest_pillar or self.runtest_pillar
- with self.underlay.yaml_editor(
- file_path="/srv/salt/reclass/nodes/_generated/"
- "cfg01.{domain_name}.yml".format(
- domain_name=self.domain_name),
- node_name=self.master_name) as editor:
- editor.content['classes'].append(
- 'cluster.{cluster_name}.infra.{class_name}'.format(
- cluster_name=self.cluster_name,
- class_name=self.class_name))
-
def save_runtime_logs(self, logs=None, inspect=None):
if logs:
with open("{path}/{target}_tempest_run.log".format(
@@ -198,33 +102,13 @@
f.write(container_inspect)
def prepare(self):
- barbican_pillar = "nova:controller:barbican:enabled"
- result = self.__salt_api.get_pillar(tgt=self.control_name,
- pillar=barbican_pillar)
- self.barbican = result[0].get(self.control_name, False)
- self.store_runtest_model()
- cirros_pillar = ("salt-call --out=newline_values_only "
- "pillar.get "
- "glance:client:identity:"
- "admin_identity:image:cirros:location")
- dpdk_pillar = "linux:network:dpdk:enabled"
- salt_cmd = "salt -l info --hard-crash --state-output=mixed "
salt_call_cmd = "salt-call -l info --hard-crash --state-output=mixed "
-
- result = self.__salt_api.get_pillar(tgt=self.compute_name,
- pillar=dpdk_pillar)
-
- dpdk = result[0].get(self.compute_name, False)
- LOG.info("DPDK enabled: {}".format(bool(dpdk)))
-
+ barbican_integration = self.__salt_api.get_single_pillar(
+ tgt="I@barbican:client and ctl*",
+ pillar="_param:barbican_integration_enabled")
+ LOG.info("barbican_integration: {}".format(barbican_integration))
commands = [
{
- 'description': "Sync salt objects for runtest model",
- 'node_name': self.master_name,
- 'cmd': ("set -ex;" +
- salt_cmd + "'*' saltutil.refresh_pillar && " +
- salt_cmd + "'*' saltutil.sync_all")},
- {
'description': ("Install docker-ce package and "
"enable packets forwarding"),
'node_name': self.target_name,
@@ -238,59 +122,14 @@
salt_call_cmd + " pip.install setuptools && " +
salt_call_cmd + " pip.install docker")},
{
- 'description': "Run salt.minion state for runtest formula",
- 'node_name': self.master_name,
- 'cmd': ("set -ex;" +
- salt_call_cmd + " state.sls salt.minion && "
- " sleep 20")},
- {
- 'description': "Enforce keystone state for neutronv2",
- 'node_name': self.master_name,
- 'cmd': ("set -ex;" +
- salt_call_cmd + " state.sls keystone.client")},
- {
- 'description': "Create networks for Tempest tests",
- 'node_name': self.master_name,
- 'cmd': ("set -ex;" +
- salt_call_cmd + " state.sls neutron.client")},
- {
- 'description': "Create flavors for Tempest tests",
- 'node_name': self.master_name,
- 'cmd': ("set -ex;" +
- salt_call_cmd + " state.sls nova.client")},
- {
- 'description': "Upload images for Tempest",
- 'node_name': self.master_name,
- 'cmd': ("set -ex;" +
- salt_call_cmd + " state.sls glance.client")},
- {
'description': "Generate config for Tempest",
'node_name': self.master_name,
'cmd': ("set -ex;" +
- salt_call_cmd + " state.sls runtest")},
- {
- 'description': "Upload cirros image",
- 'node_name': self.master_name,
- 'cmd': ("set -ex;"
- "cirros_url=$({}) && {} '{}' cmd.run "
- "\"wget $cirros_url -O /tmp/TestCirros-0.3.5.img\""
- .format(cirros_pillar, salt_cmd, self.target_name))},
+ "salt-run state.orchestrate " +
+ "runtest.orchestrate.tempest")},
]
- if dpdk:
- commands.append({
- 'description': "Configure flavor for DPDK",
- 'node_name': self.control_name,
- 'cmd': ("set -ex;" +
- salt_call_cmd + " cmd.run "
- " '. /root/keystonercv3;"
- " openstack flavor set m1.extra_tiny_test"
- " --property hw:mem_page_size=any;"
- " openstack flavor set m1.tiny_test"
- " --property hw:mem_page_size=any'")},
- )
-
- if self.barbican:
+ if barbican_integration == 'True':
commands.append({
'description': "Configure barbican",
'node_name': self.master_name,
@@ -312,6 +151,7 @@
docker_args = (
" -t "
+ " --net host "
" --name {container_name} "
" -e ARGS=\"-r {tempest_pattern} -w {tempest_threads}\""
" -v {cfg_dir}/tempest.conf:/etc/tempest/tempest.conf"
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index a468b02..1e1640f 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -167,6 +167,24 @@
result = self.local(tgt=tgt, fun='pillar.get', args=pillar)
return result['return']
+ def get_single_pillar(self, tgt, pillar):
+ """Get a scalar value from a single node
+
+ :return: pillar value
+ """
+
+ result = self.get_pillar(tgt, pillar)
+ nodes = result[0].keys()
+
+ if not nodes:
+ raise LookupError("No minions selected "
+ "for the target '{0}'".format(tgt))
+ if len(nodes) > 1:
+ raise LookupError("Too many minions selected "
+ "for the target '{0}' , expected one: {1}"
+ .format(tgt, nodes))
+ return result[0][nodes[0]]
+
def get_grains(self, tgt, grains):
result = self.local(tgt=tgt, fun='grains.get', args=grains)
return result['return']
@@ -248,6 +266,10 @@
result = self.local(tgt=tgt, fun='service.stop', args=service)
return result['return']
+ def cmd_run(self, tgt, cmd):
+ result = self.local(tgt=tgt, fun='cmd.run', args=cmd)
+ return result['return']
+
@utils.retry(3, exception=libpepper.PepperException)
def sync_time(self, tgt='*'):
LOG.info("NTP time sync on the salt minions '{0}'".format(tgt))
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 66f686b..cf1fb2b 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -483,23 +483,43 @@
timeout=600)
# create target dir for archives
- master.check_call("mkdir /root/dump/")
+ master.check_call("mkdir -p /root/dump/")
+
+ saltkeys_res = master.check_call(
+ "salt-key --list all --out=yaml", verbose=True)
+
+ saltkeys_all = yaml.load(saltkeys_res.stdout_str)
+ minions = saltkeys_all['minions']
+
+ # add nodes registered self.config_ssh,
+ # to get logs from nodes without salt minions
+ for node in self.config_ssh:
+ # If there is no any minion which name starts
+ # with the same hostname as node['node_name']
+ if not any(minion.startswith(node['node_name'])
+ for minion in minions):
+ # Use IP address from node['host'] to access the node
+ # because cfg01 node may not know it's hostname.
+ # Note: SSH public key from system.openssh.server.team.lab
+ # should already be configured on that node
+ # in order to access the node from cfg01
+ minions.append(str(node['host']))
# get archived artifacts to the master node
- for node in self.config_ssh:
- LOG.info("Getting archived artifacts from the node {0}"
- .format(node['node_name']))
+ for minion in minions:
+ LOG.info("Getting archived artifacts from the minion {0}"
+ .format(minion))
master.check_call("rsync -aruv {0}:/root/*.tar.gz "
- "/root/dump/".format(node['node_name']),
+ "/root/dump/".format(minion.strip()),
raise_on_err=False,
timeout=120)
- destination_name = '/root/{0}_dump.tar.gz'.format(artifact_name)
- # Archive the artifacts from all nodes
+ destination_name = '/tmp/{0}_dump.tar.gz'.format(artifact_name)
+ # Archive the artifacts from all minions
master.check_call(
'cd /root/dump/;'
'tar --absolute-names --warning=no-file-changed -czf '
- ' {0} ./'.format(destination_name))
+ ' {0} ./'.format(destination_name), verbose=True)
# Download the artifact to the host
LOG.info("Downloading the artifact {0}".format(destination_name))
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index fca6a6d..e1fa137 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -64,6 +64,9 @@
DOCKER_REGISTRY = os.environ.get('DOCKER_REGISTRY',
'docker-prod-local.artifactory.mirantis.com')
+BINARY_REGISTRY = os.environ.get('BINARY_REGISTRY', 'https://'
+ 'docker-prod-local.artifactory.mirantis.com/'
+ 'artifactory/binary-prod-local')
DOCKER_NAME = os.environ.get('DOCKER_NAME',
'mirantis/oscore/rally-tempest:latest')
DOCKER_IMAGES_SL_TAG = os.environ.get('DOCKER_IMAGES_SL_TAG', 'latest')
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 0a447d6..4b1f14a 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -325,24 +325,51 @@
ct.Cfg('kubernetes_admin_password', ct.String(),
default='sbPfel23ZigJF3Bm'),
ct.Cfg('kubernetes_docker_package', ct.String(), default=''),
- ct.Cfg('kubernetes_hyperkube_image', ct.String(),
- default='{}/mirantis/kubernetes/hyperkube-amd64:v1.11.3-2'.format(
- settings.DOCKER_REGISTRY)),
+ ct.Cfg('kubernetes_hyperkube_source', ct.String(),
+ default='{}/mirantis/kubernetes/hyperkube-binaries/'
+ 'hyperkube_v1.12.3-2_1544133573591'.format(
+ settings.BINARY_REGISTRY)),
+ ct.Cfg('kubernetes_hyperkube_source_hash', ct.String(),
+ default='md5=fc23eaf3ba63d9ed9d141f465f584012'),
ct.Cfg('kubernetes_pause_image', ct.String(),
- default='{}/mirantis/kubernetes/pause-amd64:v1.11.3-2'.format(
+ default='{}/mirantis/kubernetes/pause-amd64:v1.12.3-2'.format(
settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_calico_image', ct.String(),
- default='{}/mirantis/projectcalico/calico/node:v3.1.3'.format(
+ default='{}/mirantis/projectcalico/calico/node:v3.3.2'.format(
settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_calico_calicoctl_image', ct.String(),
- default='{}/mirantis/projectcalico/calico/ctl:v3.1.3'.format(
+ default='{}/mirantis/projectcalico/calico/ctl:v3.3.2'.format(
settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_calico_cni_image', ct.String(),
- default='{}/mirantis/projectcalico/calico/cni:v3.1.3'.format(
+ default='{}/mirantis/projectcalico/calico/cni:v3.3.2'.format(
settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_calico_kube_controllers_image', ct.String(),
default='{}/mirantis/projectcalico/calico/kube-controllers:'
- 'v3.1.3'.format(settings.DOCKER_REGISTRY)),
+ 'v3.3.2'.format(settings.DOCKER_REGISTRY)),
+ ct.Cfg('kubernetes_calico_calicoctl_source', ct.String(),
+ default='{}/mirantis/projectcalico/calicoctl/'
+ 'calicoctl-v3.3.2'.format(
+ settings.BINARY_REGISTRY)),
+ ct.Cfg('kubernetes_calico_calicoctl_source_hash', ct.String(),
+ default='md5=bb38517fdd6b8bb7c130ae7550a9d335'),
+ ct.Cfg('kubernetes_calico_birdcl_source', ct.String(),
+ default='{}/mirantis/projectcalico/bird/'
+ 'birdcl-v0.3.3'.format(
+ settings.BINARY_REGISTRY)),
+ ct.Cfg('kubernetes_calico_birdcl_source_hash', ct.String(),
+ default='md5=0327442efd2592ddce449b66c5d0fc9d'),
+ ct.Cfg('kubernetes_calico_cni_source', ct.String(),
+ default='{}/mirantis/projectcalico/cni-plugin/'
+ 'calico-v3.3.2'.format(
+ settings.BINARY_REGISTRY)),
+ ct.Cfg('kubernetes_calico_cni_source_hash', ct.String(),
+ default='md5=2544bc1865c1451cac7a61264c25a2cb'),
+ ct.Cfg('kubernetes_calico_cni_ipam_source', ct.String(),
+ default='{}/mirantis/projectcalico/cni-plugin/'
+ 'calico-ipam-v3.3.2'.format(
+ settings.BINARY_REGISTRY)),
+ ct.Cfg('kubernetes_calico_cni_ipam_source_hash', ct.String(),
+ default='md5=b22623eeea3b29ba8ec071d859ac7055'),
ct.Cfg('kubernetes_netchecker_enabled', ct.Boolean(),
help="", default=True),
ct.Cfg('kubernetes_netchecker_agent_image', ct.String(),
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/underlay.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/underlay.yaml
index cdab801..4e7082a 100644
--- a/tcp_tests/templates/cookied-bm-contrail-maas/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/underlay.yaml
@@ -3,7 +3,6 @@
{% import 'cookied-bm-contrail-maas/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
{% import 'cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-bm-contrail-maas/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-contrail-maas') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
@@ -19,7 +18,6 @@
- &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
template:
devops_settings:
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay.yaml
index 3e3ee04..0b689aa 100644
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay.yaml
@@ -3,7 +3,6 @@
{% import 'cookied-bm-contrail-nfv-maas/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
{% import 'cookied-bm-contrail-nfv-maas/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-bm-contrail-nfv-maas/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-contrail-nfv-maas') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
@@ -19,7 +18,6 @@
- &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
template:
devops_settings:
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml
index 34c254d..ac23ec1 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml
@@ -19,4 +19,4 @@
{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/lab04-physical-inventory.yaml
index 7e21691..736a356 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/lab04-physical-inventory.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/lab04-physical-inventory.yaml
@@ -54,7 +54,7 @@
role: single_mgm
deploy_address: 172.16.49.73
enp5s0f0:
- role: single_contrail_dpdk_prv
+ role: single_contrail_dpdk_vlan_prv
tenant_address: 192.168.0.101
dpdk_pci: "'0000:05:00.0'"
dpdk_mac: '90:e2:ba:19:c2:18'
@@ -74,7 +74,7 @@
role: single_mgm
deploy_address: 172.16.49.74
enp5s0f0:
- role: single_contrail_dpdk_prv
+ role: single_contrail_dpdk_vlan_prv
tenant_address: 192.168.0.102
dpdk_pci: "'0000:05:00.0'"
dpdk_mac: '00:1b:21:87:21:98'
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml
index 6c838cb..4eae932 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml
@@ -1,8 +1,10 @@
{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_CTL01 with context %}
{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+
{% set PATTERN = os_env('PATTERN', 'false') %}
{% set RUN_TEMPEST = os_env('RUN_TEMPEST', 'false') %}
@@ -28,34 +30,86 @@
{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
# install contrail
-- description: Install docker for Opencontrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database' state.sls docker.host
+
+- description: Install Docker services
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host' match.pillar 'docker:host' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host' state.sls docker.host
+ fi; sleep 10;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 20}
skip_fail: false
-- description: Install Opencontrail requirements
+- description: Install opencontrail database services on first minion
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database' state.sls opencontrail exclude=opencontrail.client
+ -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install opencontrail database services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database' state.sls opencontrail.database
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail control services on first minion
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail control services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail collectors on first minion
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:collector and *01*' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail collectors
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 20}
skip_fail: false
- description: Spawn Opencontrail docker images
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database' state.sls docker.client && sleep 15;
+ -C 'I@opencontrail:control or I@opencontrail:collector' state.sls docker.client && sleep 15;
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 3, delay: 5}
skip_fail: false
- description: Finalize opencontrail services
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database and *01*' state.sls opencontrail.client
+ -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 30}
skip_fail: false
+- description: Finalize opencontrail services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:client and not I@opencontrail:compute' state.sls opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Finalize opencontrail services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 30}
+ skip_fail: true
+
- description: Check contrail status
cmd: sleep 15; salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
@@ -63,132 +117,6 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Install Opencontrail client on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.highstate exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 192.168.0.0/24 --name net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol tcp';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol tcp';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol icmp';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol icmp';
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on ctl
- cmd: salt "ctl01*" cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: salt "ctl01*" cmd.run 'iptables --policy FORWARD ACCEPT'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Hack resolv.conf on VCP nodes for internal services access
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Remove crashes files from /var/crashes/ while vrouter was crashed
- cmd: salt "cmp*" cmd.run "rm -rf /var/crashes/*"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
- description: Reboot computes
cmd: |
salt "cmp*" system.reboot;
@@ -197,9 +125,21 @@
retry: {count: 1, delay: 30}
skip_fail: true
-- description: Finalize contrail
+- description: Remove crashes files from /var/crashes/ while vrouter was crashed
+ cmd: salt "cmp*" cmd.run "rm -rf /var/crashes/*"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Apply Opencontrail compute
cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail.client
node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 30}
+ skip_fail: false
+
+- description: Apply Opencontrail compute
+ cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail
+ node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -211,10 +151,175 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: WR for having ability to use hostnames
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Hack resolv.conf on VCP nodes for internal services access
cmd: |
- CTL01_ADDRESS=`salt --out=newline_values_only 'ctl01*' network.interface_ip ens2`;
- echo "$CTL01_ADDRESS ctl01.{{ DOMAIN_NAME }} ctl01" >> /etc/hosts;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Create heat-net before external net create
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create heat-net'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create public network for contrail
+ cmd: |
+ salt 'ntw01*' contrail.virtual_network_create public '{"external":true,"ip_prefix":"192.168.200.0","ip_prefix_len":24,"asn":64512,"target":10000}'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create heat-net 10.20.30.0/24 --allocation-pool start=10.20.30.10,end=10.20.30.254 --gateway 10.20.30.1 --name heat-subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create heat-router'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set heat-router public'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add heat-router heat-subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Fix default security group for access to external net from outside
+ cmd: |
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule list --column ID -f value | xargs openstack security group rule delete';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol tcp';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol tcp';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol icmp';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol icmp';
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: true
+
+# Starting prepare runtest
+
+- description: Upload tempest template
+ upload:
+ local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+ local_filename: runtest.yml
+ remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/
+ node_name: {{ HOSTNAME_CFG01 }}
+ skip_fail: False
+
+- description: Include class with tempest template into cfg node
+ cmd: |
+ sed -i 's/classes\:/classes\:\n- cluster.{{ LAB_CONFIG_NAME }}.infra.runtest/g' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml;
+ salt '*' saltutil.refresh_pillar;
+ salt '*' saltutil.sync_all;
+ salt 'ctl01*' pkg.install docker.io;
+ salt 'ctl01*' cmd.run 'iptables --policy FORWARD ACCEPT';
+ salt 'cfg01*' state.sls salt.minion && sleep 20;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Enforce keystone client
+ cmd: |
+ salt 'cfg01*' state.sls keystone.client;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Steps from nova client for dpdk
+ cmd: |
+ . /root/keystonercv3; nova flavor-create m1.extra_tiny_test 998 1024 5 1;
+ nova flavor-create m1.tiny_test 999 1024 5 1;
+ nova flavor-key m1.extra_tiny_test set hw:mem_page_size=1GB;
+ nova flavor-key m1.tiny_test set hw:mem_page_size=1GB;
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Upload cirros image
+ cmd: |
+ salt 'cfg01*' state.sls glance.client;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Generate tempest config
+ cmd: |
+ salt 'cfg01*' state.sls runtest;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Download cirros image for runtest
+ cmd: |
+ wget http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img -O /tmp/TestCirros-0.3.5.img
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Test future contrail manipulation
+ cmd: |
+ apt install crudini jq -y;
+ crudini --set /tmp/test/tempest.conf auth tempest_roles admin;
+ crudini --set /tmp/test/tempest.conf patrole custom_policy_files /etc/opencontrail/policy.json;
+ crudini --set /tmp/test/tempest.conf sdn service_name opencontrail;
+ cat /tmp/test/tempest.conf;
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Run tempest from new docker image
+ cmd: |
+ OPENSTACK_VERSION=`salt-call --out=newline_values_only pillar.get _param:openstack_version`;
+ docker run --name "run-tempest-yml" -d -e ARGS="-r test -w 2" -v /tmp/test/tempest.conf:/etc/tempest/tempest.conf -v /tmp/:/tmp/ -v /tmp/test:/root/tempest -v /etc/ssl/certs/:/etc/ssl/certs/ docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:$OPENSTACK_VERSION /bin/bash -c "run-tempest";
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Test Wait container script
+ cmd: |
+ report_file=`find /tmp/test -maxdepth 1 -name 'report_*xml' -print -quit`;
+ if [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ -f "$report_file" ];
+ then echo "All done!"; docker logs run-tempest-yml;
+ elif [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ ! -f "$report_file" ];
+ then echo "Exit without report!"; docker logs run-tempest-yml;
+ else echo "Tempest not finished... ";sleep 900; false;
+ fi
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 25, delay: 30}
+ skip_fail: false
+
+- description: Download xml results
+ download:
+ remote_path: /tmp/test/
+ remote_filename: "report_*.xml"
+ local_path: {{ os_env('PWD') }}
+ node_name: {{ HOSTNAME_CTL01 }}
+ skip_fail: true
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/runtest.yml b/tcp_tests/templates/cookied-bm-contrail40-nfv/runtest.yml
new file mode 100644
index 0000000..f0d6d8a
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/runtest.yml
@@ -0,0 +1,47 @@
+classes:
+- service.runtest.tempest
+- service.runtest.tempest.public_net
+- service.runtest.tempest.services.manila.glance
+parameters:
+ _param:
+ glance_image_cirros_location: http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img
+ glance_image_fedora_location: http://cz8133.bud.mirantis.net:8099/Fedora-Cloud-Base-27-1.6.x86_64.qcow2
+ glance_image_manila_location: http://cz8133.bud.mirantis.net:8099/manila-service-image-master.qcow2
+ openstack_public_neutron_subnet_allocation_end: 192.168.200.220
+ openstack_public_neutron_subnet_allocation_start: 192.168.200.130
+ openstack_public_neutron_subnet_cidr: 192.168.200.0/24
+ openstack_public_neutron_subnet_gateway: 192.168.200.1
+ runtest_tempest_cfg_dir: /tmp/test
+ runtest_tempest_cfg_name: tempest.conf
+ runtest_tempest_public_net: public
+ tempest_test_target: ctl01*
+ neutron:
+ client:
+ enabled: true
+ runtest:
+ enabled: true
+ keystonerc_node: ctl01*
+ tempest:
+ DEFAULT:
+ log_file: tempest.log
+ cfg_dir: ${_param:runtest_tempest_cfg_dir}
+ cfg_name: ${_param:runtest_tempest_cfg_name}
+ compute:
+ min_compute_nodes: 2
+ convert_to_uuid:
+ network:
+ public_network_id: ${_param:runtest_tempest_public_net}
+ enabled: true
+ heat_plugin:
+ build_timeout: '600'
+ put_keystone_rc_enabled: false
+ put_local_image_file_enabled: false
+ share:
+ capability_snapshot_support: true
+ run_driver_assisted_migration_tests: false
+ run_manage_unmanage_snapshot_tests: false
+ run_manage_unmanage_tests: false
+ run_migration_with_preserve_snapshots_tests: false
+ run_quota_tests: true
+ run_replication_tests: false
+ run_snapshot_tests: true
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-ocata.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-ocata.yaml
index 50da2d4..7825f00 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-ocata.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-ocata.yaml
@@ -30,7 +30,7 @@
backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
bmk_enabled: 'False'
ceph_enabled: 'False'
- opencontrail_compute_iface: enp5s0f0
+ opencontrail_compute_iface: enp5s0f0.${_param:tenant_vlan}
openstack_nfv_dpdk_enabled: 'True'
openstack_nfv_sriov_enabled: 'True'
openstack_nfv_sriov_network: physnet1
@@ -97,8 +97,8 @@
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
deployment_type: physical
- dns_server01: 172.18.208.44
- dns_server02: 8.8.4.4
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
email_address: sgudz@mirantis.com
infra_bond_mode: active-backup
infra_deploy_nic: eth0
@@ -150,7 +150,7 @@
opencontrail_control_node03_address: 10.167.8.23
opencontrail_control_node03_hostname: ntw03
opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.100
+ opencontrail_router01_address: 10.167.8.220
opencontrail_router01_hostname: rtr01
opencontrail_router02_address: 10.167.8.101
opencontrail_router02_hostname: rtr02
@@ -242,7 +242,7 @@
stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.1
+ tenant_network_gateway: 192.168.0.220
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: 192.168.0.0/24
tenant_vlan: '2423'
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail.yaml
index b130ae8..8372b67 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail.yaml
@@ -30,7 +30,7 @@
backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
bmk_enabled: 'False'
ceph_enabled: 'False'
- opencontrail_compute_iface: enp5s0f0
+ opencontrail_compute_iface: enp5s0f0.${_param:tenant_vlan}
openstack_nfv_dpdk_enabled: 'True'
openstack_nfv_sriov_enabled: 'True'
openstack_nfv_sriov_network: physnet1
@@ -97,8 +97,8 @@
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
deployment_type: physical
- dns_server01: 172.18.208.44
- dns_server02: 8.8.4.4
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
email_address: sgudz@mirantis.com
infra_bond_mode: active-backup
infra_deploy_nic: eth0
@@ -150,7 +150,7 @@
opencontrail_control_node03_address: 10.167.8.23
opencontrail_control_node03_hostname: ntw03
opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.100
+ opencontrail_router01_address: 10.167.8.220
opencontrail_router01_hostname: rtr01
opencontrail_router02_address: 10.167.8.101
opencontrail_router02_hostname: rtr02
@@ -242,7 +242,7 @@
stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.1
+ tenant_network_gateway: 192.168.0.220
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: 192.168.0.0/24
tenant_vlan: '2423'
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
index e59fdf8..594f46d 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
@@ -32,23 +32,13 @@
set -e;
# Remove rack01 key
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/nodes.yml;
# Add openstack_compute_node definition from system
reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: "Change path to internal storage for salt.control images"
- cmd: |
- set -e;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
- description: Temporary workaround for removing cinder-volume from CTL nodes
cmd: |
sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/sl.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/sl.yaml
index df0fbae..5810c71 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/sl.yaml
@@ -1,266 +1,15 @@
{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{% import 'shared-sl.yaml' as SHARED_SL with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-#Launch containers
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 4, delay: 40}
- skip_fail: false
-
-- description: Configure Alerta if it is exists
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 4, delay: 10}
- skip_fail: false
-
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:collector' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' match.pillar 'prometheus:collector' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' state.sls prometheus.collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
- cmd: |
- FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Fluentd service presence: ${FLUENTD_SERVICE}";
- if [[ "$FLUENTD_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- else
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
- cmd: |
- CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Ceilometer service presence: ${CEILO}";
- if [[ "$CEILO" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Remote Collector in Docker Swarm for Openstack deployments
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 15}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
+{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
+{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
+{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
+{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
+{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
+{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
+{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604-hwe.yaml
index 106c3d5..ba69177 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604-hwe.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604-hwe.yaml
@@ -27,6 +27,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -97,3 +100,26 @@
iface lo inet loopback
auto {interface_name}
iface {interface_name} inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604.yaml
index 915981e..bdcd21d 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604.yaml
@@ -27,6 +27,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -93,3 +96,26 @@
iface lo inet loopback
auto {interface_name}
iface {interface_name} inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay.yaml
index e99cee9..ed36fad 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay.yaml
@@ -12,6 +12,7 @@
{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP003 = os_env('HOSTNAME_CMP003', 'cmp003.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM04 = os_env('HOSTNAME_KVM04', 'kvm04.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
diff --git a/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml b/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml
index ec83dc7..e9f7c0b 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml
@@ -1,8 +1,10 @@
{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_CTL01 with context %}
{% from 'cookied-bm-contrail40/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-bm-contrail40/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+
{% set PATTERN = os_env('PATTERN', 'false') %}
{% set RUN_TEMPEST = os_env('RUN_TEMPEST', 'false') %}
@@ -28,39 +30,84 @@
{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
# install contrail
-- description: Install docker for Opencontrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database' state.sls docker.host
+
+- description: Install Docker services
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host' match.pillar 'docker:host' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host' state.sls docker.host
+ fi; sleep 10;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 20}
skip_fail: false
-- description: Install Opencontrail requirements
+- description: Install opencontrail database services on first minion
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database' state.sls opencontrail exclude=opencontrail.client
+ -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install opencontrail database services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database' state.sls opencontrail.database
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail control services on first minion
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail control services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail collectors on first minion
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:collector and *01*' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail collectors
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 20}
skip_fail: false
- description: Spawn Opencontrail docker images
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database' state.sls docker.client && sleep 15;
+ -C 'I@opencontrail:control or I@opencontrail:collector' state.sls docker.client && sleep 15;
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 3, delay: 5}
skip_fail: false
- description: Finalize opencontrail services
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database and *01*' state.sls opencontrail.client
+ -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 30}
- skip_fail: true
+ skip_fail: false
-- description: Highstate analytics
+- description: Finalize opencontrail services
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'nal*' state.sls highstate && sleep 15;
+ -C 'I@opencontrail:client and not I@opencontrail:compute' state.sls opencontrail.client
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Finalize opencontrail services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 30}
skip_fail: true
- description: Check contrail status
@@ -70,132 +117,6 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Install Opencontrail client on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.highstate exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 192.168.0.0/24 --name net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol tcp';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol tcp';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol icmp';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol icmp';
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on ctl
- cmd: salt "ctl01*" cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: salt "ctl01*" cmd.run 'iptables --policy FORWARD ACCEPT'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Hack resolv.conf on VCP nodes for internal services access
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Remove crashes files from /var/crashes/ while vrouter was crashed
- cmd: salt "cmp*" cmd.run "rm -rf /var/crashes/*"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
- description: Reboot computes
cmd: |
salt "cmp*" system.reboot;
@@ -204,9 +125,21 @@
retry: {count: 1, delay: 30}
skip_fail: true
-- description: Finalize contrail
+- description: Remove crashes files from /var/crashes/ while vrouter was crashed
+ cmd: salt "cmp*" cmd.run "rm -rf /var/crashes/*"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Apply Opencontrail compute
cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail.client
node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 30}
+ skip_fail: false
+
+- description: Apply Opencontrail compute
+ cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail
+ node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -218,10 +151,172 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: WR for having ability to use hostnames
- cmd: |
- CTL01_ADDRESS=`salt --out=newline_values_only 'ctl01*' network.interface_ip ens2`;
- echo "$CTL01_ADDRESS ctl01.{{ DOMAIN_NAME }} ctl01" >> /etc/hosts;
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Hack resolv.conf on VCP nodes for internal services access
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Create heat-net before external net create
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create heat-net'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create public network for contrail
+ cmd: |
+ salt 'ntw01*' contrail.virtual_network_create public '{"external":true,"ip_prefix":"192.168.200.0","ip_prefix_len":24,"asn":64512,"target":10000}'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create heat-net 10.20.30.0/24 --allocation-pool start=10.20.30.10,end=10.20.30.254 --gateway 10.20.30.1 --name heat-subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create heat-router'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set heat-router public'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add heat-router heat-subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Fix default security group for access to external net from outside
+ cmd: |
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule list --column ID -f value | xargs openstack security group rule delete';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol tcp';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol tcp';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol icmp';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol icmp';
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+# Starting prepare runtest
+
+- description: Upload tempest template
+ upload:
+ local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+ local_filename: runtest.yml
+ remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/
+ node_name: {{ HOSTNAME_CFG01 }}
+ skip_fail: False
+
+- description: Include class with tempest template into cfg node
+ cmd: |
+ sed -i 's/classes\:/classes\:\n- cluster.{{ LAB_CONFIG_NAME }}.infra.runtest/g' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml;
+ salt '*' saltutil.refresh_pillar;
+ salt '*' saltutil.sync_all;
+ salt 'ctl01*' pkg.install docker.io;
+ salt 'ctl01*' cmd.run 'iptables --policy FORWARD ACCEPT';
+ salt 'cfg01*' state.sls salt.minion && sleep 20;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Enforce keystone client
+ cmd: |
+ salt 'cfg01*' state.sls keystone.client;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Create flavors for tests
+ cmd: |
+ salt 'cfg01*' state.sls nova.client;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Upload cirros image
+ cmd: |
+ salt 'cfg01*' state.sls glance.client;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Generate tempest config
+ cmd: |
+ salt 'cfg01*' state.sls runtest;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Download cirros image for runtest
+ cmd: |
+ wget http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img -O /tmp/TestCirros-0.3.5.img
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Test future contrail manipulation
+ cmd: |
+ apt install crudini jq -y;
+ crudini --set /tmp/test/tempest.conf auth tempest_roles admin;
+ crudini --set /tmp/test/tempest.conf patrole custom_policy_files /etc/opencontrail/policy.json;
+ crudini --set /tmp/test/tempest.conf sdn service_name opencontrail;
+ cat /tmp/test/tempest.conf;
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Run tempest from new docker image
+ cmd: |
+ OPENSTACK_VERSION=`salt-call --out=newline_values_only pillar.get _param:openstack_version`;
+ docker run --name "run-tempest-yml" -d -e ARGS="-r test -w 2" -v /tmp/test/tempest.conf:/etc/tempest/tempest.conf -v /tmp/:/tmp/ -v /tmp/test:/root/tempest -v /etc/ssl/certs/:/etc/ssl/certs/ docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:$OPENSTACK_VERSION /bin/bash -c "run-tempest";
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Test Wait container script
+ cmd: |
+ report_file=`find /tmp/test -maxdepth 1 -name 'report_*xml' -print -quit`;
+ if [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ -f "$report_file" ];
+ then echo "All done!"; docker logs run-tempest-yml;
+ elif [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ ! -f "$report_file" ];
+ then echo "Exit without report!"; docker logs run-tempest-yml;
+ else echo "Tempest not finished... ";sleep 900; false;
+ fi
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 25, delay: 30}
+ skip_fail: false
+
+- description: Download xml results
+ download:
+ remote_path: /tmp/test/
+ remote_filename: "report_*.xml"
+ local_path: {{ os_env('PWD') }}
+ node_name: {{ HOSTNAME_CTL01 }}
skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-contrail40/runtest.yml b/tcp_tests/templates/cookied-bm-contrail40/runtest.yml
new file mode 100644
index 0000000..f0d6d8a
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail40/runtest.yml
@@ -0,0 +1,47 @@
+classes:
+- service.runtest.tempest
+- service.runtest.tempest.public_net
+- service.runtest.tempest.services.manila.glance
+parameters:
+ _param:
+ glance_image_cirros_location: http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img
+ glance_image_fedora_location: http://cz8133.bud.mirantis.net:8099/Fedora-Cloud-Base-27-1.6.x86_64.qcow2
+ glance_image_manila_location: http://cz8133.bud.mirantis.net:8099/manila-service-image-master.qcow2
+ openstack_public_neutron_subnet_allocation_end: 192.168.200.220
+ openstack_public_neutron_subnet_allocation_start: 192.168.200.130
+ openstack_public_neutron_subnet_cidr: 192.168.200.0/24
+ openstack_public_neutron_subnet_gateway: 192.168.200.1
+ runtest_tempest_cfg_dir: /tmp/test
+ runtest_tempest_cfg_name: tempest.conf
+ runtest_tempest_public_net: public
+ tempest_test_target: ctl01*
+ neutron:
+ client:
+ enabled: true
+ runtest:
+ enabled: true
+ keystonerc_node: ctl01*
+ tempest:
+ DEFAULT:
+ log_file: tempest.log
+ cfg_dir: ${_param:runtest_tempest_cfg_dir}
+ cfg_name: ${_param:runtest_tempest_cfg_name}
+ compute:
+ min_compute_nodes: 2
+ convert_to_uuid:
+ network:
+ public_network_id: ${_param:runtest_tempest_public_net}
+ enabled: true
+ heat_plugin:
+ build_timeout: '600'
+ put_keystone_rc_enabled: false
+ put_local_image_file_enabled: false
+ share:
+ capability_snapshot_support: true
+ run_driver_assisted_migration_tests: false
+ run_manage_unmanage_snapshot_tests: false
+ run_manage_unmanage_tests: false
+ run_migration_with_preserve_snapshots_tests: false
+ run_quota_tests: true
+ run_replication_tests: false
+ run_snapshot_tests: true
diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml
index db9b61b..e7dcb16 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml
@@ -87,8 +87,8 @@
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
deployment_type: physical
- dns_server01: 172.18.208.44
- dns_server02: 8.8.4.4
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
email_address: sgudz@mirantis.com
infra_bond_mode: active-backup
infra_deploy_nic: eth0
@@ -140,7 +140,7 @@
opencontrail_control_node03_address: 10.167.8.23
opencontrail_control_node03_hostname: ntw03
opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.100
+ opencontrail_router01_address: 10.167.8.220
opencontrail_router01_hostname: rtr01
opencontrail_router02_address: 10.167.8.101
opencontrail_router02_hostname: rtr02
@@ -236,7 +236,7 @@
stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.1
+ tenant_network_gateway: 192.168.0.220
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: 192.168.0.0/24
tenant_vlan: '2423'
diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml
index 4a9dc13..50eabbe 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml
@@ -87,8 +87,8 @@
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
deployment_type: physical
- dns_server01: 172.18.208.44
- dns_server02: 8.8.4.4
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
email_address: sgudz@mirantis.com
infra_bond_mode: active-backup
infra_deploy_nic: eth0
@@ -140,7 +140,7 @@
opencontrail_control_node03_address: 10.167.8.23
opencontrail_control_node03_hostname: ntw03
opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.100
+ opencontrail_router01_address: 10.167.8.220
opencontrail_router01_hostname: rtr01
opencontrail_router02_address: 10.167.8.101
opencontrail_router02_hostname: rtr02
@@ -236,7 +236,7 @@
stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.1
+ tenant_network_gateway: 192.168.0.220
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: 192.168.0.0/24
tenant_vlan: '2423'
diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
index 3542e9b..aa4f4d7 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
@@ -32,7 +32,7 @@
set -e;
# Remove rack01 key
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/nodes.yml;
# Add openstack_compute_node definition from system
reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-bm-contrail40/sl.yaml b/tcp_tests/templates/cookied-bm-contrail40/sl.yaml
index 49cf6a8..a5495cd 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/sl.yaml
@@ -1,266 +1,15 @@
{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{% import 'shared-sl.yaml' as SHARED_SL with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-#Launch containers
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 4, delay: 40}
- skip_fail: false
-
-- description: Configure Alerta if it is exists
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 4, delay: 10}
- skip_fail: false
-
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:collector' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' match.pillar 'prometheus:collector' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' state.sls prometheus.collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
- cmd: |
- FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Fluentd service presence: ${FLUENTD_SERVICE}";
- if [[ "$FLUENTD_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- else
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
- cmd: |
- CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Ceilometer service presence: ${CEILO}";
- if [[ "$CEILO" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Remote Collector in Docker Swarm for Openstack deployments
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 15}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
+{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
+{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
+{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
+{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
+{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
+{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
+{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data1604-hwe.yaml
index 106c3d5..ba69177 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data1604-hwe.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data1604-hwe.yaml
@@ -27,6 +27,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -97,3 +100,26 @@
iface lo inet loopback
auto {interface_name}
iface {interface_name} inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data1604.yaml
index 915981e..bdcd21d 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data1604.yaml
@@ -27,6 +27,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -93,3 +96,26 @@
iface lo inet loopback
auto {interface_name}
iface {interface_name} inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-contrail40/underlay.yaml b/tcp_tests/templates/cookied-bm-contrail40/underlay.yaml
index 409842b..ba82288 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/underlay.yaml
@@ -12,6 +12,7 @@
{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP003 = os_env('HOSTNAME_CMP003', 'cmp003.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM04 = os_env('HOSTNAME_KVM04', 'kvm04.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604-hwe.yaml
index 07a6936..44ae1f5 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604-hwe.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604-hwe.yaml
@@ -27,6 +27,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -98,3 +101,26 @@
iface lo inet loopback
auto {interface_name}
iface {interface_name} inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604.yaml
index 9168b7f..b39b37a 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -58,4 +61,27 @@
auto lo
iface lo inet loopback
auto {interface_name}
- iface {interface_name} inet dhcp
\ No newline at end of file
+ iface {interface_name} inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml
index 084a922..53c19d1 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml
@@ -118,7 +118,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 8192
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604-hwe-compute.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604-hwe-compute.yaml
index ddbd762..dd34ede 100644
--- a/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604-hwe-compute.yaml
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604-hwe-compute.yaml
@@ -28,6 +28,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -102,3 +105,26 @@
iface lo inet loopback
auto {interface_name}
iface {interface_name} inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604.yaml
index 89b0da7..51fbc96 100644
--- a/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604.yaml
@@ -27,6 +27,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -93,3 +96,26 @@
iface lo inet loopback
auto {interface_name}
iface {interface_name} inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
index 983a026..4983612 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
@@ -27,6 +27,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -98,3 +101,26 @@
iface lo inet loopback
auto {interface_name}
iface {interface_name} inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
index 9168b7f..b39b37a 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -58,4 +61,27 @@
auto lo
iface lo inet loopback
auto {interface_name}
- iface {interface_name} inet dhcp
\ No newline at end of file
+ iface {interface_name} inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml
index b1e37c6..14e7c37 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml
@@ -12,4 +12,4 @@
{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/lab04-physical-inventory.yaml
index e66753d..a3640c8 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/lab04-physical-inventory.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/lab04-physical-inventory.yaml
@@ -56,7 +56,7 @@
role: single_mgm
deploy_address: 172.16.49.73
enp5s0f0:
- role: single_contrail_dpdk_prv
+ role: single_contrail_dpdk_vlan_prv
tenant_address: 192.168.0.101
dpdk_pci: "'0000:05:00.0'"
dpdk_mac: '90:e2:ba:19:c2:18'
@@ -75,7 +75,7 @@
role: single_mgm
deploy_address: 172.16.49.74
enp5s0f0:
- role: single_contrail_dpdk_prv
+ role: single_contrail_dpdk_vlan_prv
tenant_address: 192.168.0.102
dpdk_pci: "'0000:05:00.0'"
dpdk_mac: '00:1b:21:87:21:98'
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
index d97665e..875aace 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
@@ -1,5 +1,7 @@
{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
{% set PATTERN = os_env('PATTERN', 'false') %}
@@ -9,7 +11,7 @@
# Install OpenStack control services
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -162,20 +164,13 @@
retry: {count: 10, delay: 30}
skip_fail: false
- # Upload cirros image
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+- description: Reboot computes
+ cmd: |
+ salt "cmp*" system.reboot;
+ sleep 600;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
- skip_fail: false
+ skip_fail: true
- description: sync time
cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
@@ -184,18 +179,6 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Install docker.io on ctl
- cmd: salt "ctl01*" cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: salt "ctl01*" cmd.run 'iptables --policy FORWARD ACCEPT'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
- description: Hack resolv.conf on VCP nodes for internal services access
cmd: |
salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
@@ -203,10 +186,155 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Reboot computes
+- description: Create heat-net before external net create
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create heat-net'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create public network for contrail
cmd: |
- salt "cmp*" system.reboot;
- sleep 600;
+ salt 'ntw01*' contrail.virtual_network_create public '{"external":true,"ip_prefix":"192.168.200.0","ip_prefix_len":24,"asn":64512,"target":10000}'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create heat-net 10.20.30.0/24 --allocation-pool start=10.20.30.10,end=10.20.30.254 --gateway 10.20.30.1 --name heat-subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create heat-router'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set heat-router public'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add heat-router heat-subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Fix default security group for access to external net from outside
+ cmd: |
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule list --column ID -f value | xargs openstack security group rule delete';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol tcp';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol tcp';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol icmp';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol icmp';
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: true
+
+# Starting prepare runtest
+
+- description: Upload tempest template
+ upload:
+ local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+ local_filename: runtest.yml
+ remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/
+ node_name: {{ HOSTNAME_CFG01 }}
+ skip_fail: False
+
+- description: Include class with tempest template into cfg node
+ cmd: |
+ sed -i 's/classes\:/classes\:\n- cluster.{{ LAB_CONFIG_NAME }}.infra.runtest/g' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml;
+ salt '*' saltutil.refresh_pillar;
+ salt '*' saltutil.sync_all;
+ salt 'ctl01*' pkg.install docker.io;
+ salt 'ctl01*' cmd.run 'iptables --policy FORWARD ACCEPT';
+ salt 'cfg01*' state.sls salt.minion && sleep 20;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Enforce keystone client
+ cmd: |
+ salt 'cfg01*' state.sls keystone.client;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Steps from nova client for dpdk
+ cmd: |
+ . /root/keystonercv3; nova flavor-create m1.extra_tiny_test 998 1024 5 1;
+ nova flavor-create m1.tiny_test 999 1024 5 1;
+ nova flavor-key m1.extra_tiny_test set hw:mem_page_size=1GB;
+ nova flavor-key m1.tiny_test set hw:mem_page_size=1GB;
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Upload cirros image
+ cmd: |
+ salt 'cfg01*' state.sls glance.client;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Generate tempest config
+ cmd: |
+ salt 'cfg01*' state.sls runtest;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Download cirros image for runtest
+ cmd: |
+ wget http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img -O /tmp/TestCirros-0.3.5.img
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Test future contrail manipulation
+ cmd: |
+ apt install crudini jq -y;
+ crudini --set /tmp/test/tempest.conf auth tempest_roles admin;
+ crudini --set /tmp/test/tempest.conf patrole custom_policy_files /etc/opencontrail/policy.json;
+ crudini --set /tmp/test/tempest.conf sdn service_name opencontrail;
+ cat /tmp/test/tempest.conf;
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Run tempest from new docker image
+ cmd: |
+ OPENSTACK_VERSION=`salt-call --out=newline_values_only pillar.get _param:openstack_version`;
+ docker run --name "run-tempest-yml" -d -e ARGS="-r test -w 2" -v /tmp/test/tempest.conf:/etc/tempest/tempest.conf -v /tmp/:/tmp/ -v /tmp/test:/root/tempest -v /etc/ssl/certs/:/etc/ssl/certs/ docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:$OPENSTACK_VERSION /bin/bash -c "run-tempest";
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Test Wait container script
+ cmd: |
+ report_file=`find /tmp/test -maxdepth 1 -name 'report_*xml' -print -quit`;
+ if [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ -f "$report_file" ];
+ then echo "All done!"; docker logs run-tempest-yml;
+ elif [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ ! -f "$report_file" ];
+ then echo "Exit without report!"; docker logs run-tempest-yml;
+ else echo "Tempest not finished... ";sleep 900; false;
+ fi
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 25, delay: 30}
+ skip_fail: false
+
+- description: Download xml results
+ download:
+ remote_path: /tmp/test/
+ remote_filename: "report_*.xml"
+ local_path: {{ os_env('PWD') }}
+ node_name: {{ HOSTNAME_CTL01 }}
+ skip_fail: true
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/runtest.yml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/runtest.yml
new file mode 100644
index 0000000..f0d6d8a
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/runtest.yml
@@ -0,0 +1,47 @@
+classes:
+- service.runtest.tempest
+- service.runtest.tempest.public_net
+- service.runtest.tempest.services.manila.glance
+parameters:
+ _param:
+ glance_image_cirros_location: http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img
+ glance_image_fedora_location: http://cz8133.bud.mirantis.net:8099/Fedora-Cloud-Base-27-1.6.x86_64.qcow2
+ glance_image_manila_location: http://cz8133.bud.mirantis.net:8099/manila-service-image-master.qcow2
+ openstack_public_neutron_subnet_allocation_end: 192.168.200.220
+ openstack_public_neutron_subnet_allocation_start: 192.168.200.130
+ openstack_public_neutron_subnet_cidr: 192.168.200.0/24
+ openstack_public_neutron_subnet_gateway: 192.168.200.1
+ runtest_tempest_cfg_dir: /tmp/test
+ runtest_tempest_cfg_name: tempest.conf
+ runtest_tempest_public_net: public
+ tempest_test_target: ctl01*
+ neutron:
+ client:
+ enabled: true
+ runtest:
+ enabled: true
+ keystonerc_node: ctl01*
+ tempest:
+ DEFAULT:
+ log_file: tempest.log
+ cfg_dir: ${_param:runtest_tempest_cfg_dir}
+ cfg_name: ${_param:runtest_tempest_cfg_name}
+ compute:
+ min_compute_nodes: 2
+ convert_to_uuid:
+ network:
+ public_network_id: ${_param:runtest_tempest_public_net}
+ enabled: true
+ heat_plugin:
+ build_timeout: '600'
+ put_keystone_rc_enabled: false
+ put_local_image_file_enabled: false
+ share:
+ capability_snapshot_support: true
+ run_driver_assisted_migration_tests: false
+ run_manage_unmanage_snapshot_tests: false
+ run_manage_unmanage_tests: false
+ run_migration_with_preserve_snapshots_tests: false
+ run_quota_tests: true
+ run_replication_tests: false
+ run_snapshot_tests: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-cookiecutter-contrail-dpdk.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-cookiecutter-contrail-dpdk.yaml
index 6e0fee1..bfd683a 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-cookiecutter-contrail-dpdk.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-cookiecutter-contrail-dpdk.yaml
@@ -72,6 +72,8 @@
mcp_common_scripts_branch: ''
mcp_version: proposed
offline_deployment: 'False'
+ opencontrail_version: 3.0
+ linux_repo_contrail_component: oc32
opencontrail_analytics_address: 10.167.8.30
opencontrail_analytics_hostname: nal
opencontrail_analytics_node01_address: 10.167.8.31
@@ -80,6 +82,7 @@
opencontrail_analytics_node02_hostname: nal02
opencontrail_analytics_node03_address: 10.167.8.33
opencontrail_analytics_node03_hostname: nal03
+ opencontrail_compute_iface: enp5s0f0.${_param:tenant_vlan}
opencontrail_compute_iface_mask: '24'
opencontrail_control_address: 10.167.8.20
opencontrail_control_hostname: ntw
@@ -90,7 +93,7 @@
opencontrail_control_node03_address: 10.167.8.23
opencontrail_control_node03_hostname: ntw03
opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.101
+ opencontrail_router01_address: 10.167.8.220
opencontrail_router01_hostname: rtr01
opencontrail_router02_address: 10.167.8.102
opencontrail_router02_hostname: rtr02
@@ -192,7 +195,7 @@
stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.1
+ tenant_network_gateway: 192.168.0.220
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: 192.168.0.0/24
tenant_vlan: '2423'
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
index 77980d0..9332875 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
@@ -33,19 +33,9 @@
set -e;
# Remove rack01 key
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
# Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: "Change path to internal storage for salt.control images"
- cmd: |
- set -e;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml
index 5ba2e3f..c25aab3 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml
@@ -1,245 +1,15 @@
{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-sl.yaml' as SHARED_SL with context %}
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:client' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-# Install slv2 infra
-#Launch containers
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 20}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true and *01*' state.sls elasticsearch.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true' state.sls elasticsearch.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true and *01*' state.sls kibana.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true' state.sls kibana.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server and *01*' state.sls influxdb;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb;
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install service for the log collection
-- description: Configure fluentd
- cmd: |
- FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Fluentd service presence: ${FLUENTD_SERVICE}";
- if [[ "$FLUENTD_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- else
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
- cmd: |
- CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Ceilometer service presence: ${CEILO}";
- if [[ "$CEILO" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
- ######################################
- ######################################
- ######################################
-
-- description: Collect Grains
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Remote Collector in Docker Swarm for Openstack deployments
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 60}
- skip_fail: false
-
-- description: Configure Alerta if it is exists
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion.cert
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
+{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
+{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
+{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
+{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
+{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
+{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
+{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
+{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml
index be97ae6..2f27c0f 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml
@@ -9,6 +9,7 @@
{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml
index 4dc3470..d837e26 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml
@@ -1,4 +1,5 @@
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
@@ -12,4 +13,4 @@
{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
index fe01d30..b7e5829 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
@@ -1,5 +1,7 @@
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
{% set PATTERN = os_env('PATTERN', 'false') %}
@@ -9,7 +11,7 @@
# Install OpenStack control services
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -100,20 +102,7 @@
{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
# Install compute node
@@ -136,42 +125,6 @@
retry: {count: 10, delay: 30}
skip_fail: false
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on ctl
- cmd: salt "ctl01*" cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: salt "ctl01*" cmd.run 'iptables --policy FORWARD ACCEPT'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
- description: Hack resolv.conf on VCP nodes for internal services access
cmd: |
salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
@@ -179,7 +132,6 @@
retry: {count: 1, delay: 5}
skip_fail: false
-
- description: Hack vrouter (Delete default moun point)
cmd: salt "cmp*" cmd.run "sed -i 's/exit 0//g' /etc/rc.local; echo 'umount /dev/hugepages; service supervisor-vrouter restart' >> /etc/rc.local; echo 'exit 0' >> /etc/rc.local"
node_name: {{ HOSTNAME_CFG01 }}
@@ -205,3 +157,167 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: true
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Hack resolv.conf on VCP nodes for internal services access
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Create heat-net before external net create
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create heat-net'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create public network for contrail
+ cmd: |
+ salt 'ntw01*' contrail.virtual_network_create public '{"external":true,"ip_prefix":"192.168.200.0","ip_prefix_len":24,"asn":64512,"target":10000}'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create heat-net 10.20.30.0/24 --allocation-pool start=10.20.30.10,end=10.20.30.254 --gateway 10.20.30.1 --name heat-subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create heat-router'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set heat-router public'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add heat-router heat-subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Fix default security group for access to external net from outside
+ cmd: |
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule list --column ID -f value | xargs openstack security group rule delete';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol tcp';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol tcp';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol icmp';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol icmp';
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+# Starting prepare runtest
+
+- description: Upload tempest template
+ upload:
+ local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+ local_filename: runtest.yml
+ remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/
+ node_name: {{ HOSTNAME_CFG01 }}
+ skip_fail: False
+
+- description: Include class with tempest template into cfg node
+ cmd: |
+ sed -i 's/classes\:/classes\:\n- cluster.{{ LAB_CONFIG_NAME }}.infra.runtest/g' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml;
+ salt '*' saltutil.refresh_pillar;
+ salt '*' saltutil.sync_all;
+ salt 'ctl01*' pkg.install docker.io;
+ salt 'ctl01*' cmd.run 'iptables --policy FORWARD ACCEPT';
+ salt 'cfg01*' state.sls salt.minion && sleep 20;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Enforce keystone client
+ cmd: |
+ salt 'cfg01*' state.sls keystone.client;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Create flavors for tests
+ cmd: |
+ salt 'cfg01*' state.sls nova.client;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Upload cirros image
+ cmd: |
+ salt 'cfg01*' state.sls glance.client;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Generate tempest config
+ cmd: |
+ salt 'cfg01*' state.sls runtest;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Download cirros image for runtest
+ cmd: |
+ wget http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img -O /tmp/TestCirros-0.3.5.img
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Test future contrail manipulation
+ cmd: |
+ apt install crudini jq -y;
+ crudini --set /tmp/test/tempest.conf auth tempest_roles admin;
+ crudini --set /tmp/test/tempest.conf patrole custom_policy_files /etc/opencontrail/policy.json;
+ crudini --set /tmp/test/tempest.conf sdn service_name opencontrail;
+ cat /tmp/test/tempest.conf;
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Run tempest from new docker image
+ cmd: |
+ OPENSTACK_VERSION=`salt-call --out=newline_values_only pillar.get _param:openstack_version`;
+ docker run --name "run-tempest-yml" -d -e ARGS="-r test -w 2" -v /tmp/test/tempest.conf:/etc/tempest/tempest.conf -v /tmp/:/tmp/ -v /tmp/test:/root/tempest -v /etc/ssl/certs/:/etc/ssl/certs/ docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:$OPENSTACK_VERSION /bin/bash -c "run-tempest";
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Test Wait container script
+ cmd: |
+ report_file=`find /tmp/test -maxdepth 1 -name 'report_*xml' -print -quit`;
+ if [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ -f "$report_file" ];
+ then echo "All done!"; docker logs run-tempest-yml;
+ elif [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ ! -f "$report_file" ];
+ then echo "Exit without report!"; docker logs run-tempest-yml;
+ else echo "Tempest not finished... ";sleep 900; false;
+ fi
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 25, delay: 30}
+ skip_fail: false
+
+- description: Download xml results
+ download:
+ remote_path: /tmp/test/
+ remote_filename: "report_*.xml"
+ local_path: {{ os_env('PWD') }}
+ node_name: {{ HOSTNAME_CTL01 }}
+ skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/runtest.yml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/runtest.yml
new file mode 100644
index 0000000..f0d6d8a
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/runtest.yml
@@ -0,0 +1,47 @@
+classes:
+- service.runtest.tempest
+- service.runtest.tempest.public_net
+- service.runtest.tempest.services.manila.glance
+parameters:
+ _param:
+ glance_image_cirros_location: http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img
+ glance_image_fedora_location: http://cz8133.bud.mirantis.net:8099/Fedora-Cloud-Base-27-1.6.x86_64.qcow2
+ glance_image_manila_location: http://cz8133.bud.mirantis.net:8099/manila-service-image-master.qcow2
+ openstack_public_neutron_subnet_allocation_end: 192.168.200.220
+ openstack_public_neutron_subnet_allocation_start: 192.168.200.130
+ openstack_public_neutron_subnet_cidr: 192.168.200.0/24
+ openstack_public_neutron_subnet_gateway: 192.168.200.1
+ runtest_tempest_cfg_dir: /tmp/test
+ runtest_tempest_cfg_name: tempest.conf
+ runtest_tempest_public_net: public
+ tempest_test_target: ctl01*
+ neutron:
+ client:
+ enabled: true
+ runtest:
+ enabled: true
+ keystonerc_node: ctl01*
+ tempest:
+ DEFAULT:
+ log_file: tempest.log
+ cfg_dir: ${_param:runtest_tempest_cfg_dir}
+ cfg_name: ${_param:runtest_tempest_cfg_name}
+ compute:
+ min_compute_nodes: 2
+ convert_to_uuid:
+ network:
+ public_network_id: ${_param:runtest_tempest_public_net}
+ enabled: true
+ heat_plugin:
+ build_timeout: '600'
+ put_keystone_rc_enabled: false
+ put_local_image_file_enabled: false
+ share:
+ capability_snapshot_support: true
+ run_driver_assisted_migration_tests: false
+ run_manage_unmanage_snapshot_tests: false
+ run_manage_unmanage_tests: false
+ run_migration_with_preserve_snapshots_tests: false
+ run_quota_tests: true
+ run_replication_tests: false
+ run_snapshot_tests: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
index 476df0d..03e966e 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
@@ -72,6 +72,8 @@
mcp_common_scripts_branch: ''
mcp_version: 2018.3.1
offline_deployment: 'False'
+ opencontrail_version: 3.0
+ linux_repo_contrail_component: oc32
opencontrail_analytics_address: 10.167.8.30
opencontrail_analytics_hostname: nal
opencontrail_analytics_node01_address: 10.167.8.31
@@ -90,7 +92,7 @@
opencontrail_control_node03_address: 10.167.8.23
opencontrail_control_node03_hostname: ntw03
opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.100
+ opencontrail_router01_address: 10.167.8.220
opencontrail_router01_hostname: rtr01
opencontrail_router02_address: 10.167.8.101
opencontrail_router02_hostname: rtr02
@@ -187,7 +189,7 @@
stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.1
+ tenant_network_gateway: 192.168.0.220
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: 192.168.0.0/24
tenant_vlan: '2423'
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
index 24ee31f..1fa16cf 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
@@ -15,18 +15,13 @@
{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
-
{% import 'shared-salt.yaml' as SHARED with context %}
{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
- description: "Workaround for rack01 compute generator"
@@ -34,19 +29,9 @@
set -e;
# Remove rack01 key
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
# Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: "Change path to internal storage for salt.control images"
- cmd: |
- set -e;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
@@ -94,7 +79,6 @@
skip_fail: false
{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
- description: Update minion information
@@ -175,9 +159,7 @@
skip_fail: false
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
- description: "Lab04 workaround: Give each node root acces with key from cfg01"
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
index 0a3867a..795f98a 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
@@ -1,245 +1,14 @@
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-sl.yaml' as SHARED_SL with context %}
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:client' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-# Install slv2 infra
-#Launch containers
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 20}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true and *01*' state.sls elasticsearch.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true' state.sls elasticsearch.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true and *01*' state.sls kibana.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true' state.sls kibana.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server and *01*' state.sls influxdb;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb;
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install service for the log collection
-- description: Configure fluentd
- cmd: |
- FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Fluentd service presence: ${FLUENTD_SERVICE}";
- if [[ "$FLUENTD_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- else
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
- cmd: |
- CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Ceilometer service presence: ${CEILO}";
- if [[ "$CEILO" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
- ######################################
- ######################################
- ######################################
-
-- description: Collect Grains
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Remote Collector in Docker Swarm for Openstack deployments
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 60}
- skip_fail: false
-
-- description: Configure Alerta if it is exists
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion.cert
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
+{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
+{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
+{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
+{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
+{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
+{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
+{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
+{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
index d520d62..3de4ae9 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
@@ -14,6 +14,7 @@
{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
# {# set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) #}
# {# set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) #}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data1604-hwe.yaml
index 07a6936..44ae1f5 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data1604-hwe.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data1604-hwe.yaml
@@ -27,6 +27,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -98,3 +101,26 @@
iface lo inet loopback
auto {interface_name}
iface {interface_name} inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data1604.yaml
index 9168b7f..b39b37a 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data1604.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -58,4 +61,27 @@
auto lo
iface lo inet loopback
auto {interface_name}
- iface {interface_name} inet dhcp
\ No newline at end of file
+ iface {interface_name} inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml
index 7dff4de..c75c6d5 100644
--- a/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml
@@ -213,6 +213,17 @@
retry: {count: 1, delay: 30}
skip_fail: false
+- description: Fix default security group for access to external net from outside
+ cmd: |
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule list --column ID -f value | xargs openstack security group rule delete';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol tcp';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol tcp';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol icmp';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol icmp';
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
# Starting prepare runtest
- description: Upload tempest template
@@ -270,14 +281,38 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Run tempest from new docker image
+- description: Test future contrail manipulation
cmd: |
- OPENSTACK_VERSION=`salt-call --out=newline_values_only pillar.get _param:openstack_version`;
- docker run -e ARGS="-r test -w 2" -v /tmp/test/tempest.conf:/etc/tempest/tempest.conf -v /tmp/:/tmp/ -v /tmp/test:/root/tempest -v /etc/ssl/certs/:/etc/ssl/certs/ --rm docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:$OPENSTACK_VERSION /bin/bash -c "run-tempest";
+ apt install crudini jq -y;
+ crudini --set /tmp/test/tempest.conf auth tempest_roles admin;
+ crudini --set /tmp/test/tempest.conf patrole custom_policy_files /etc/opencontrail/policy.json;
+ crudini --set /tmp/test/tempest.conf sdn service_name opencontrail;
+ cat /tmp/test/tempest.conf;
node_name: {{ HOSTNAME_CTL01 }}
retry: {count: 1, delay: 30}
skip_fail: true
+- description: Run tempest from new docker image
+ cmd: |
+ OPENSTACK_VERSION=`salt-call --out=newline_values_only pillar.get _param:openstack_version`;
+ docker run --name "run-tempest-yml" -d -e ARGS="-r test -w 2" -v /tmp/test/tempest.conf:/etc/tempest/tempest.conf -v /tmp/:/tmp/ -v /tmp/test:/root/tempest -v /etc/ssl/certs/:/etc/ssl/certs/ docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:$OPENSTACK_VERSION /bin/bash -c "run-tempest";
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Test Wait container script
+ cmd: |
+ report_file=`find /tmp/test -maxdepth 1 -name 'report_*xml' -print -quit`;
+ if [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ -f "$report_file" ];
+ then echo "All done!"; docker logs run-tempest-yml;
+ elif [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ ! -f "$report_file" ];
+ then echo "Exit without report!"; docker logs run-tempest-yml;
+ else echo "Tempest not finished... ";sleep 900; false;
+ fi
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 25, delay: 30}
+ skip_fail: false
+
- description: Download xml results
download:
remote_path: /tmp/test/
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml
index 106c3d5..ba69177 100644
--- a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml
@@ -27,6 +27,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -97,3 +100,26 @@
iface lo inet loopback
auto {interface_name}
iface {interface_name} inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml
index 915981e..bdcd21d 100644
--- a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml
@@ -27,6 +27,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -93,3 +96,26 @@
iface lo inet loopback
auto {interface_name}
iface {interface_name} inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
index eb56414..dad3faf 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
@@ -123,7 +123,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 8192
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/lab04-physical-inventory.yaml
new file mode 100644
index 0000000..d51f4b6
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/lab04-physical-inventory.yaml
@@ -0,0 +1,96 @@
+nodes:
+ cfg01.cookied-cicd-bm-k8s-contrail40-maas.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_static_mgm
+ ens4:
+ role: single_static_ctl
+ # Physical nodes
+
+ kvm01.cookied-cicd-bm-k8s-contrail40-maas.local:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_dhcp
+ enp9s0f1:
+ role: single_vlan_ctl
+ ens11f0:
+ role: single_mgm_manual_vlan
+
+ kvm02.cookied-cicd-bm-k8s-contrail40-maas.local:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_dhcp
+ enp9s0f1:
+ role: single_vlan_ctl
+ ens11f0:
+ role: single_mgm_manual_vlan
+
+ kvm03.cookied-cicd-bm-k8s-contrail40-maas.local:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_dhcp
+ enp9s0f1:
+ role: single_vlan_ctl
+ ens11f0:
+ role: single_mgm_manual_vlan
+
+ ctl01.cookied-cicd-bm-k8s-contrail40-maas.local:
+ reclass_storage_name: kubernetes_control_node01
+ roles:
+ - kubernetes_control_contrail
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_dhcp
+ enp2s0f1:
+ role: single_vlan_ctl
+
+ ctl02.cookied-cicd-bm-k8s-contrail40-maas.local:
+ reclass_storage_name: kubernetes_control_node02
+ roles:
+ - kubernetes_control_contrail
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_dhcp
+ enp2s0f1:
+ role: single_vlan_ctl
+
+ ctl03.cookied-cicd-bm-k8s-contrail40-maas.local:
+ reclass_storage_name: kubernetes_control_node03
+ roles:
+ - kubernetes_control_contrail
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_dhcp
+ enp2s0f1:
+ role: single_vlan_ctl
+
+ cmp<<count>>:
+ reclass_storage_name: kubernetes_compute_rack01
+ roles:
+ - kubernetes_compute_contrail
+ - linux_system_codename_xenial
+ - salt_master_host
+ interfaces:
+ enp9s0f0:
+ role: single_dhcp
+ ens11f1:
+ role: k8s_oc40_only_vhost_on_control_vlan
diff --git a/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/salt-context-cookiecutter-k8s-contrail.yaml b/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/salt-context-cookiecutter-k8s-contrail.yaml
new file mode 100644
index 0000000..81dfa50
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/salt-context-cookiecutter-k8s-contrail.yaml
@@ -0,0 +1,362 @@
+#https://docs.mirantis.com/mcp/master/mcp-ref-arch/opencontrail-plan/contrail-overview/contrail-4.html#
+default_context:
+ backup_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAskZyhTySYWvGrp+dlv+q2La8oiM8Sv1JfQog8+UW28hGUbCq
+ PnWa7bzVqENEY+g+qbQYh2Pvb2xURyY9+02TWLmCYzq7+orO1eG2SDt384YzDc6B
+ nQohUbGwaSH2YsT/QA8KV1fvUW9P0gVEHmJWR8Jo3vdRyw+tEUszYkiTYkcVc9zt
+ O5eYT9a6gSjLhl+tmJXu38jdWTcE8zJP+OGpJRDJpV2EAYwv+LXde9REv4KCGMqN
+ lvli9IA+zarfMsAzSTPpL5ooslB20akKM1h5li3LG47OpKMG9sMVFGEEo7+lqTGa
+ zUJEUPbJU95PqLfxoIOjYF/MhWKU5VALLraVwwIDAQABAoIBAHUAj0CqlFej6G3L
+ DB6CBZrds8el4HhG6+hIsX/gprPwKVaL3+/GN7w35vHb1BLN5fJy5HZXPFmge1+G
+ 1b8NFttwRQbjEWRJnZ352Sxm+z60oOU61w4+C8gWGnWleJMyP2PHez3/1G1Z5MUt
+ 95sJZx8JlNJg9ymSTD/BXyPuBezFKf8jUSgPbhBv8B2yy82YGzqc9u7sK6BN90P1
+ 3ZcynQ4cfieZLoim56dF9YEixr8plGmGpOspPZFlVCGIc1y2BC4ZUyDatcCa7/gQ
+ 3aDdt9lkEfoCHezAFOmaZDCOZ70spkwCqXYk42BXpDjKF6djCXyx3WKVF+IhEOYT
+ /S1I8KECgYEA1tzUZgW43/Z7Sm+QnoK3R9hG2oZZOoiTDdHyC+f5dk6maNbJbivM
+ FAPIpzHtCyvBEiSgGmlqpUrwR2NbYnOHjOX72Yq7/e0Vl1XWmplKCsTDNFGXx5Fs
+ 9AQbWjOF+drgfZ5p3dNyE9689nJZg5EhTxL8dfwnZat/l+/OKFO2tM0CgYEA1GhW
+ 4FMsXe3/g7M0dj5FnbS8xjz93pei5YZanID9mY/RUcKbegdtfvtwsfkZe5trbI39
+ jv96QyJeAnf48UDFwCV6SSZF/8Ez0pYHZanwkwKsli5uZdvSz7rUyVwE6tyO24WA
+ Trgpmbb8uxhJHBNuD+bC/iGd1H0IUuJ65ChD9M8CgYEAxfp2z4boQZ2ZHw5LoHLr
+ tIyJROPUJgYgEfToZaYbC7MOzL1Un2pFwg92fPCY7dkkLraGu690r9esLOtVEhNH
+ zEFB3cJi1Gf3pBlZA9zJB8Ej6Pphs2bBkNqT5XpiMcZHYhhsjhQ+Iibz0NWuu3cn
+ zPe+nmx4VMtAZ1x0hl4UlOUCgYBh8NaWS2O60AIwrRI0r5QUGwGsevUojU0Mfr4L
+ SiMlir4e8BgW1ZP0qGYXel/4sfp/rJ1NMZojmJY2lzFPtOk6J03SqvY97s1BffJd
+ O1X1w5bnDPPUvd7f8CsryeVuklLBADbglWSBP3IbpyAW9RKb/HDPE5seHqUW6t11
+ lOd42wKBgBW0tTV6+aqao6o4ZBU0SVoNg9/PwgOubAzeSDW2+ArXn1sMmroSfXVw
+ fbUTJI5TF/1pd7A5AKoR1FfTqlssoYlMGEPI6HJ4n9/4SqLpwd99HFW0ISt+EUMh
+ Tqt9mDfKzwHxG2QTuOwyrslO8iTwRoN5OYgm4dsL471Obr4DILTz
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyRnKFPJJha8aun52W/6rYtryiIzxK/Ul9CiDz5RbbyEZRsKo+dZrtvNWoQ0Rj6D6ptBiHY+9vbFRHJj37TZNYuYJjOrv6is7V4bZIO3fzhjMNzoGdCiFRsbBpIfZixP9ADwpXV+9Rb0/SBUQeYlZHwmje91HLD60RSzNiSJNiRxVz3O07l5hP1rqBKMuGX62Yle7fyN1ZNwTzMk/44aklEMmlXYQBjC/4td171ES/goIYyo2W+WL0gD7Nqt8ywDNJM+kvmiiyUHbRqQozWHmWLcsbjs6kowb2wxUUYQSjv6WpMZrNQkRQ9slT3k+ot/Ggg6NgX8yFYpTlUAsutpXD
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ auditd_enabled: 'False'
+ cicd_control_node01_address: 10.167.13.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.13.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.13.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.13.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpgIBAAKCAQEAxLQy4F7sNBloj0fFvklCq9+IX/BV5OBs6OtSBf6A+ztTs68i
+ ib5W6Tz/knh7wt2URB6uKJJBRBK+Oqj91ZNJxISewP2f5gX10WpjOAti+Fghkemt
+ kvyA8aUxX1wuAz7Y2v1uv1IkfWa5ubL8oJXNjeT9yeCNJWBxvd46XT9UiCs5CnDL
+ lBjRg+AP2+u5OabUFtH7GSzVqcMzhi0qLilP+cRhKmar2tQXFEI5wnwADh0REAF/
+ OxUZPaPEPD9TW7fGxjfrMtyUKqTEbi+EPsIvldkR0IhYrKXjwcFFu3FKISuy8PVM
+ EKUM5aZaLMI/WiMs1zmx+bAOrkCsUAf+sVmocQIDAQABAoIBAQCRnSAojrxmmQSZ
+ RLVy9wK+/zwSYXUEeMrG5iUTQOM0mCePVa/IrjnicYB0anKbv7IZd2jPqe1cuk9O
+ V3mJGH68Vry6+0XaX0EpJIuMmolKdNttC8Ktk/TUbciN4kxBpM2d14ybXvCaUGhe
+ usxfCGZhi0oAnxV9vNaWiqNEEjS+k4u9XTnj3+GxstEwch+l7xJwz83WEsx7G1Zz
+ 3Yxg7mh2uRPVCOZGVdClciym+9WHHrcdYw/OJCsSFsT7+qgzptsvXBVxa6EuGaVY
+ Pba+UfOnYIKlBtka4i3zXGaCQF6t2FHw5WyUEmYm3iBYmrGBbEf+3665Kh4NQs0a
+ PV4eHlLdAoGBAO8nDgkTA4gi1gyFy2YBUFP2BignkKCZGHkD8qvBnOt1Rxm6USlZ
+ 7GzAtU3nSd8ODzgOBI7+zd82yRqv2hEwP7xARhr0Nx1XvyaQtRlQ6tQnBgvqLDCG
+ n0qvWoBM+Yl6sTRGYavAMCaR7PuULUcZFNWk7m0fv4vqddGijgRsje37AoGBANKP
+ nN72BujsQIhdzAYS+u5/Hxu56Tvgupe6kWkhQuV8MQcM+79I6cgJxxH6zQDP/hGt
+ 3vXapgWUgi025LuEUWfkxAtTUfT4cRP2x529CH/XLQMYVqWxkoben9r+eFav+Kgw
+ C0dR3vSOlEMzYoIF+p/km0mIV1ZKZvrWymtXSdODAoGBAL4feUwDfqpKr4pzD1l/
+ r+Gf1BM2KQdTzp3eYpzjJiIWMTkl4wIRyCBJL5nIRvT6E2VH153qubY7srLxnFZP
+ 2kuJeXJSNkKwkHlTT3XZ22Zfw7HTL+BAFgDk2PjouPTvwlLBpUJKXr07A4CZs0kz
+ ilmybg340GmmMpY/OdIQjuDjAoGBAMcd5hP2hqxHlfMe+CwaUM+uySU4FvZ32xxW
+ 4uGPAarwWZC4V20Zr3JqxKUdDjYhersPOFQ4c129hajqSz2EsFLWRAoNvegx9QUT
+ Dsv9EgeK3Vca8f14wf7mkjbPA8++UyABZvkH1BZiqpQuCI66xrnjvnG4DBde/qlg
+ 60S84+SvAoGBAKH1feNtJaNhDxF0OqRuVmSFyL3pkMDoYr/mgpT4T1ToRBW5AtEt
+ Io4egi68ph8IChAt/TGFvikW7tbEgK9ACD/RAfl+LiuhxqJJFtC1LfGfHI7ntuRj
+ DjQrUy59ULoflh3iWBPtpw2ooRlSrAwaIgGt9odMECXp3BK8WLsUG9H1
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEtDLgXuw0GWiPR8W+SUKr34hf8FXk4Gzo61IF/oD7O1OzryKJvlbpPP+SeHvC3ZREHq4okkFEEr46qP3Vk0nEhJ7A/Z/mBfXRamM4C2L4WCGR6a2S/IDxpTFfXC4DPtja/W6/UiR9Zrm5svyglc2N5P3J4I0lYHG93jpdP1SIKzkKcMuUGNGD4A/b67k5ptQW0fsZLNWpwzOGLSouKU/5xGEqZqva1BcUQjnCfAAOHREQAX87FRk9o8Q8P1Nbt8bGN+sy3JQqpMRuL4Q+wi+V2RHQiFispePBwUW7cUohK7Lw9UwQpQzlploswj9aIyzXObH5sA6uQKxQB/6xWahx
+ cluster_domain: bm-mcp-k8s-contrail.local
+ cluster_name: bm-mcp-k8s-contrail
+ # compute_bond_mode: active-backup
+ compute_padding_with_zeros: 'True'
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: GAZbu3hguubkeYMg15AQ1J6UuY60TAh8h0EVpNUrHWhjRS2SxRYBuxxLvVURv82m
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.13.0/24
+ control_vlan: '2410'
+ backend_vlan: '2402'
+ cookiecutter_template_branch: ''
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+ deploy_network_gateway: 172.17.41.2
+ deploy_network_netmask: 255.255.255.192
+ deploy_network_subnet: 172.17.41.0/26
+ deployment_type: physical
+ dns_server01: 172.18.208.44
+ dns_server02: 172.18.176.6
+ email_address: dtyzhnenko@mirantis.com
+ etcd_ssl: 'True'
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.13.241
+ infra_kvm01_deploy_address: 172.17.41.4
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.13.242
+ infra_kvm02_deploy_address: 172.17.41.5
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.13.243
+ infra_kvm03_deploy_address: 172.17.41.6
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.13.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'True'
+ kubernetes_compute_count: 2
+ kubernetes_compute_rack01_hostname: cmp
+ kubernetes_compute_single_address_ranges: 10.167.13.103-10.167.13.104
+ kubernetes_compute_tenant_address_ranges: 10.167.13.103-10.167.13.104
+ kubernetes_network_opencontrail_enabled: 'True'
+ kubernetes_keepalived_vip_interface: br_ctl
+ kubernetes_metallb_enabled: 'False' # Not used with opencontrail
+ metallb_addresses: 172.17.41.160-172.17.41.180
+ kubernetes_ingressnginx_enabled: 'True'
+ kubernetes_ingressnginx_controller_replicas: 2
+ local_repositories: 'False'
+ maas_enabled: 'True'
+ maas_deploy_address: 172.17.41.3
+ maas_deploy_cidr: 172.17.41.0/26
+ maas_deploy_gateway: 172.17.41.2
+ maas_deploy_range_end: 172.17.41.60
+ maas_deploy_range_start: 172.17.41.13
+ maas_deploy_vlan: '0'
+ maas_dhcp_enabled: 'True'
+ maas_fabric_name: fabric-0
+ maas_hostname: cfg01
+ maas_manage_deploy_network: 'True'
+ maas_machines: |
+ kvm01: # cz7784-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ # pxe_interface_mac:
+ pxe_interface_mac: "0c:c4:7a:6c:88:fe"
+ interfaces:
+ enp9s0f0:
+ mac: "0c:c4:7a:6c:88:fe"
+ mode: "static"
+ ip: "172.17.41.4"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.115"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm02: # #cz7785-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:34:53:8e"
+ interfaces:
+ enp9s0f0:
+ mac: "0c:c4:7a:34:53:8e"
+ mode: "static"
+ ip: "172.17.41.5"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.121"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm03: # #cz7744-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:34:55:2c"
+ interfaces:
+ enp9s0f0:
+ mac: "0c:c4:7a:34:55:2c"
+ mode: "static"
+ ip: "172.17.41.6"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.126"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ ctl01: # #cz7609.bud.mirantis.net
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:54:a2:9c"
+ interfaces:
+ enp2s0f0:
+ mac: "0c:c4:7a:54:a2:9c"
+ mode: "static"
+ ip: "172.17.41.9"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.239"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ ctl02: # #cz7631-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:54:a0:56"
+ interfaces:
+ enp2s0f0:
+ mac: "0c:c4:7a:54:a0:56"
+ mode: "static"
+ ip: "172.17.41.10"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.237"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ ctl03: # #cz7632-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:54:a0:08"
+ interfaces:
+ enp2s0f0:
+ mac: "0c:c4:7a:54:a0:08"
+ mode: "static"
+ ip: "172.17.41.11"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.236"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp001: # #cz7781-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6c:58:06"
+ interfaces:
+ enp9s0f0:
+ mac: "0c:c4:7a:6c:58:06"
+ mode: "static"
+ ip: "172.17.41.7"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.120"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp002: # cz7674.bud.mirantis.net
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:33:27:22"
+ interfaces:
+ enp9s0f0:
+ mac: "0c:c4:7a:33:27:22"
+ mode: "static"
+ ip: "172.17.41.8"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "176.74.217.166"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+
+ mcp_common_scripts_branch: ''
+ mcp_version: proposed
+ offline_deployment: 'False'
+ kubernetes_control_address: 10.167.13.236
+ kubernetes_control_node01_deploy_address: 172.17.41.9
+ kubernetes_control_node01_address: 10.167.13.239
+ kubernetes_control_node01_hostname: ctl01
+ kubernetes_control_node02_deploy_address: 172.17.41.10
+ kubernetes_control_node02_address: 10.167.13.238
+ kubernetes_control_node02_hostname: ctl02
+ kubernetes_control_node02_deploy_address: 172.17.41.11
+ kubernetes_control_node03_address: 10.167.13.237
+ kubernetes_control_node03_hostname: ctl03
+ linux_repo_contrail_component: oc40
+ opencontrail_analytics_hostname: ctl
+ opencontrail_analytics_node01_hostname: ctl01
+ opencontrail_analytics_node02_hostname: ctl02
+ opencontrail_analytics_node03_hostname: ctl03
+ opencontrail_analytics_address: ${_param:opencontrail_control_address}
+ opencontrail_analytics_node01_address: ${_param:opencontrail_control_node01_address}
+ opencontrail_analytics_node02_address: ${_param:opencontrail_control_node02_address}
+ opencontrail_analytics_node03_address: ${_param:opencontrail_control_node03_address}
+ opencontrail_compute_iface_mask: '24'
+ opencontrail_compute_iface: ten2
+ opencontrail_control_address: 10.167.13.236
+ opencontrail_control_hostname: ctl
+ opencontrail_control_node01_address: 10.167.13.239
+ opencontrail_control_node01_hostname: ctl01
+ opencontrail_control_node02_address: 10.167.13.238
+ opencontrail_control_node02_hostname: ctl02
+ opencontrail_control_node03_address: 10.167.13.237
+ opencontrail_control_node03_hostname: ctl03
+ opencontrail_enabled: 'True'
+ opencontrail_router01_address: 10.167.13.100
+ opencontrail_router01_hostname: rtr01
+ opencontrail_router02_address: 10.167.13.101
+ opencontrail_router02_hostname: rtr02
+ opencontrail_public_ip_range: 172.17.41.128/26
+ opencontrail_version: '4.0'
+ openstack_enabled: 'False'
+ openssh_groups: ''
+ openstack_version: pike
+ openldap_domain: ${_param:cluster_name}.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_notification_smtp_use_tls: 'False'
+ oss_pushkin_email_sender_password: password
+ oss_pushkin_smtp_host: 127.0.0.1
+ oss_pushkin_smtp_port: '587'
+ oss_webhook_app_id: '24'
+ oss_webhook_login_id: '13'
+ platform: kubernetes_enabled
+ public_host: ${_param:infra_config_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: frJMLJsfGkFXCk4qTTEttKla518Akvdp
+ salt_api_password_hash: $6$ixefPtcd$vasKuySO6L2JM0FKaB/udsQvH4upI2dWoJZaR9XTqeAx4UMvkHsNNSwsALVqTTLbXIngkFqYNXpbNm1o4iMGS.
+ salt_master_address: 10.167.13.15
+ salt_master_hostname: cfg01
+ salt_master_management_address: 172.17.41.3
+ shared_reclass_branch: ''
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.13.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.13.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.13.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.13.63
+ stacklight_log_node03_hostname: log03
+ stacklight_monitor_address: 10.167.13.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.13.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.13.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.13.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.13.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.13.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.13.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.13.88
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.167.13.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.167.13.0/24
+ tenant_vlan: '2410'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
+ vnf_onboarding_enabled: 'False'
+ manila_enabled: 'False'
+ barbican_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/salt-context-environment.yaml b/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/salt-context-environment.yaml
new file mode 100644
index 0000000..5ef4e1b
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/salt-context-environment.yaml
@@ -0,0 +1,132 @@
+nodes:
+ mon01.bm-k8s-contrail.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon02.bm-k8s-contrail.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon03.bm-k8s-contrail.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr01.bm-k8s-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr02.bm-k8s-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr03.bm-k8s-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log01.bm-k8s-contrail.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log02.bm-k8s-contrail.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log03.bm-k8s-contrail.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid01.bm-k8s-contrail.local:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid02.bm-k8s-contrail.local:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid03.bm-k8s-contrail.local:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/salt.yaml b/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/salt.yaml
new file mode 100644
index 0000000..4fe952a
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/salt.yaml
@@ -0,0 +1,82 @@
+{% from 'cookied-cicd-bm-k8s-contrail40-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-bm-k8s-contrail40-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-bm-k8s-contrail40-maas/underlay.yaml' import DOMAIN_NAME with context %}
+
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+- description: Wait for salt-master is ready after configdrive user-data
+ cmd: |
+ timeout 120 salt-call test.ping
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: Prepare maas
+ cmd: |
+ salt-call state.sls maas.cluster;
+ salt-call state.sls maas.region;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Generate a public key for machines in MAAS
+ cmd: |
+ ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub
+ maas mirantis sshkeys create key="$(cat ~root/.ssh/id_rsa.pub)"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Run comissioning of BM nodes
+ cmd: |
+ salt-call maas.process_machines
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Wait for machines ready
+ cmd: |
+ salt-call maas.machines_status && timeout 120 salt-call state.sls maas.machines.wait_for_ready
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 7, delay: 5}
+ skip_fail: false
+
+- description: Enforce the interfaces configuration defined in the model for servers
+ cmd: |
+ salt-call state.sls maas.machines.assign_ip;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Remove all the salt-minions and re-register the cfg01 minion
+ cmd: |
+ salt-key -y -D;
+ salt-call test.ping
+ sleep 5
+ # Check that the cfg01 is registered
+ salt-key | grep cfg01
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: provision the automatically commissioned physical nodes through MAAS
+ cmd: |
+ salt-call maas.deploy_machines;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Wait for machines deployed
+ cmd: |
+ salt-call maas.machines_status && timeout 300 salt-call state.sls maas.machines.wait_for_deployed
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 6, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/underlay.yaml b/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/underlay.yaml
new file mode 100644
index 0000000..e40b708
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/underlay.yaml
@@ -0,0 +1,129 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
+
+#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-bm-k8s-contrail40-maas') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.17.41.3') %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-cicd-bm-k8s-contrail40-maas_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.17.41.0/26:26') }}
+ params:
+ ip_reserved:
+ gateway: '172.17.41.2'
+ l2_network_device: +61
+ default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+ ip_ranges:
+ dhcp: [+2, -3]
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.13.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ ip_ranges:
+ dhcp: [+2, -3]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.14.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ ip_ranges:
+ dhcp: [+2, -3]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.41.128/26:26') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: -2
+ ip_ranges:
+ dhcp: [+2, -3]
+
+ groups:
+
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+ network_pools:
+ admin: admin-pool01
+
+ l2_network_devices:
+ admin:
+ address_pool: admin-pool01
+ dhcp: false
+ forward:
+ mode: bridge
+ parent_iface:
+ phys_dev: !os_env LAB_MANAGEMENT_IFACE
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: bridge
+ parent_iface:
+ phys_dev: !os_env LAB_CONTROL_IFACE
+
+ group_volumes:
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: config
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
diff --git a/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/lab04-physical-inventory.yaml
new file mode 100644
index 0000000..69fa20e
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/lab04-physical-inventory.yaml
@@ -0,0 +1,77 @@
+nodes:
+ cfg01.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ - features_runtest_cfg
+ interfaces:
+ ens3:
+ role: single_static_mgm
+ ens4:
+ role: single_static_ctl
+ # Physical nodes
+
+ kvm01.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_dhcp
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+ ens11f0:
+ role: single_mgm_manual
+
+ kvm02.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_dhcp
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+ ens11f0:
+ role: single_mgm_manual
+
+ kvm03.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_dhcp
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+ ens11f0:
+ role: single_mgm_manual
+
+ osd<<count>>:
+ reclass_storage_name: ceph_osd_rack01
+ roles:
+ - ceph_osd
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_dhcp
+ enp2s0f1:
+ role: single_vlan_ctl
+# role: bond0_ab_vlan_ceph_storage_backend
+
+ cmp<<count>>:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f1:
+ role: single_dhcp
+ enp5s0f0:
+ role: bond0_ab_contrail
+ enp5s0f1:
+ role: single_vlan_ctl
diff --git a/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/salt-context-cookiecutter-contrail.yaml
new file mode 100644
index 0000000..da08af4
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/salt-context-cookiecutter-contrail.yaml
@@ -0,0 +1,437 @@
+default_context:
+ backup_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
+ +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
+ qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
+ m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
+ 7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
+ 2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
+ HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
+ AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
+ o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
+ 5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
+ XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
+ AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
+ USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
+ uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
+ QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
+ 98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
+ r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
+ qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
+ CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
+ p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
+ 79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
+ NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
+ CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
+ XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
+ N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
+ bmk_enabled: 'False'
+ cicd_control_node01_address: 10.167.8.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.8.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.8.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.8.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
+ oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
+ IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
+ kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
+ wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
+ 27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
+ 5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
+ lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
+ k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
+ 3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
+ dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
+ 0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
+ qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
+ BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
+ UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
+ VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
+ 1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
+ nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
+ Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
+ FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
+ HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
+ Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
+ poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
+ 17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
+ l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
+ cluster_domain: cookied-cicd-bm-ocata-contrail-maas.local
+ cluster_name: cookied-cicd-bm-ocata-contrail-maas
+ opencontrail_version: 4.1
+ linux_repo_contrail_component: oc41
+ compute_bond_mode: active-backup
+ compute_padding_with_zeros: 'True'
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.8.0/24
+ control_vlan: '2422'
+ tenant_vlan: '2423'
+ backend_vlan: '2424'
+ storage_vlan: '2425' # not implemented yet, placeholder
+ cookiecutter_template_branch: ''
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+ deploy_network_gateway: 172.16.49.65
+ deploy_network_netmask: 255.255.255.192
+ deploy_network_subnet: 172.16.49.64/26
+ deployment_type: physical
+ dns_server01: 172.18.208.44
+ dns_server02: 172.18.176.6
+ email_address: sgudz@mirantis.com
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.8.241
+ infra_kvm01_deploy_address: 172.16.49.67
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.8.242
+ infra_kvm02_deploy_address: 172.16.49.68
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.8.243
+ infra_kvm03_deploy_address: 172.16.49.69
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.8.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ internal_proxy_enabled: 'False'
+ kqueen_custom_mail_enabled: 'False'
+ kqueen_enabled: 'False'
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_enabled: 'True'
+ maas_deploy_address: 172.16.49.66
+ maas_deploy_cidr: 172.16.49.64/26
+ maas_deploy_gateway: 172.16.49.65
+ maas_deploy_range_end: 172.16.49.119
+ maas_deploy_range_start: 172.16.49.77
+ maas_deploy_vlan: '0'
+ maas_dhcp_enabled: 'True'
+ maas_fabric_name: fabric-0
+ maas_hostname: cfg01
+ maas_manage_deploy_network: 'True'
+ maas_machines: |
+ kvm01: # cz7341-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ # pxe_interface_mac:
+ pxe_interface_mac: "0c:c4:7a:6c:83:56"
+ interfaces:
+ enp9s0f0:
+ mac: "0c:c4:7a:6c:83:56"
+ mode: "static"
+ ip: "172.16.49.67"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.117"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm02: # #cz7342-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6c:84:2c"
+ interfaces:
+ enp9s0f0:
+ mac: "0c:c4:7a:6c:84:2c"
+ mode: "static"
+ ip: "172.16.49.68"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.118"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm03: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6c:83:54"
+ interfaces:
+ enp9s0f0:
+ mac: "0c:c4:7a:6c:83:54"
+ mode: "static"
+ ip: "172.16.49.69"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.119"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd001: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:55:6a:d4"
+ interfaces:
+ enp2s0f0:
+ mac: "0c:c4:7a:55:6a:d4"
+ mode: "static"
+ ip: "172.16.49.70"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.243"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd002: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:55:6a:56"
+ interfaces:
+ enp2s0f0:
+ mac: "0c:c4:7a:55:6a:56"
+ mode: "static"
+ ip: "172.16.49.71"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.244"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd003: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:55:6a:2a"
+ interfaces:
+ enp2s0f0:
+ mac: "0c:c4:7a:55:6a:2a"
+ mode: "static"
+ ip: "172.16.49.72"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.245"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp001: # #cz7345-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:54:a2:5f"
+ interfaces:
+ enp2s0f1:
+ mac: "0c:c4:7a:54:a2:5f"
+ mode: "static"
+ ip: "172.16.49.73"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.233"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp002: # cz7346-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:54:a0:51"
+ interfaces:
+ enp2s0f1:
+ mac: "0c:c4:7a:54:a0:51"
+ mode: "static"
+ ip: "172.16.49.74"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.232"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+
+ mcp_common_scripts_branch: ''
+ mcp_version: proposed
+ offline_deployment: 'False'
+ opencontrail_analytics_address: 10.167.8.30
+ opencontrail_analytics_hostname: nal
+ opencontrail_analytics_node01_address: 10.167.8.31
+ opencontrail_analytics_node01_hostname: nal01
+ opencontrail_analytics_node02_address: 10.167.8.32
+ opencontrail_analytics_node02_hostname: nal02
+ opencontrail_analytics_node03_address: 10.167.8.33
+ opencontrail_analytics_node03_hostname: nal03
+ opencontrail_compute_iface_mask: '24'
+ opencontrail_control_address: 10.167.8.20
+ opencontrail_control_hostname: ntw
+ opencontrail_control_node01_address: 10.167.8.21
+ opencontrail_control_node01_hostname: ntw01
+ opencontrail_control_node02_address: 10.167.8.22
+ opencontrail_control_node02_hostname: ntw02
+ opencontrail_control_node03_address: 10.167.8.23
+ opencontrail_control_node03_hostname: ntw03
+ opencontrail_enabled: 'True'
+ opencontrail_router01_address: 10.167.8.220
+ opencontrail_router01_hostname: rtr01
+ opencontrail_router02_address: 10.167.8.101
+ opencontrail_router02_hostname: rtr02
+ openldap_enabled: 'False'
+ openssh_groups: ''
+ openstack_benchmark_node01_address: 10.167.8.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_single_address_ranges: 10.167.8.101-10.167.8.102
+ openstack_compute_deploy_address_ranges: 172.16.49.73-172.16.49.74
+ openstack_compute_tenant_address_ranges: 10.167.10.101-10.167.10.102
+ openstack_compute_backend_address_ranges: 10.167.10.101-10.167.10.102
+ openstack_control_address: 10.167.8.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.8.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.8.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.8.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.8.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.8.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.8.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.8.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_message_queue_address: 10.167.8.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.8.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.8.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.8.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: opencontrail
+ openstack_neutron_bgp_vpn: 'False'
+ openstack_neutron_bgp_vpn_driver: bagpipe
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_nova_compute_reserved_host_memory_mb: '900'
+ openstack_proxy_address: 10.167.8.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.8.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.8.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.8.19
+ openstack_version: ocata
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_webhook_app_id: '24'
+ oss_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
+ salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
+ salt_master_address: 10.167.8.15
+ salt_master_hostname: cfg01
+ salt_master_management_address: 172.16.49.66
+ shared_reclass_branch: ''
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.8.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.8.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.8.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.8.63
+ stacklight_log_node03_hostname: log03
+ stacklight_long_term_storage_type: prometheus
+ stacklight_monitor_address: 10.167.8.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.8.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.8.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.8.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.8.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.8.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.8.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.8.88
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 192.168.0.220
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 192.168.0.0/24
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
+ openldap_domain: cookied-cicd-bm-ocata-contrail-maas.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ ceph_enabled: 'True'
+ ceph_version: "luminous"
+ ceph_hyper_converged: "False"
+ ceph_osd_backend: "bluestore"
+ ceph_osd_count: "3"
+ ceph_osd_node_count: 3
+ ceph_osd_block_db_size: 20
+ ceph_osd_journal_size: 20
+ ceph_osd_bond_mode: "active-backup"
+ ceph_osd_data_partition_prefix: ""
+
+ ceph_public_network_allocation: storage
+ ceph_public_network: "10.167.8.0/24"
+ ceph_cluster_network: "10.167.8.0/24"
+
+# for 2018.11.0+
+ ceph_osd_single_address_ranges: "10.167.8.200-10.167.8.202"
+ ceph_osd_deploy_address_ranges: "172.16.49.70-172.16.49.72"
+ ceph_osd_storage_address_ranges: "10.167.8.200-10.167.8.202"
+ ceph_osd_backend_address_ranges: "10.167.10.200-10.167.10.202"
+
+ ceph_osd_data_disks: "/dev/sdb"
+ ceph_osd_journal_or_block_db_disks: "/dev/sdb"
+ ceph_osd_mode: "separated"
+ ceph_osd_deploy_nic: "eth0"
+ ceph_osd_primary_first_nic: "eth1"
+ ceph_osd_primary_second_nic: "eth2"
+ #ceph_mon_node01_address: "172.16.47.66"
+ #ceph_mon_node01_deploy_address: "172.16.48.66"
+ ceph_mon_node01_address: "10.167.8.66"
+ ceph_mon_node01_hostname: "cmn01"
+ #ceph_mon_node02_address: "172.16.47.67"
+ #ceph_mon_node02_deploy_address: "172.16.48.67"
+ ceph_mon_node02_address: "10.167.8.67"
+ ceph_mon_node02_hostname: "cmn02"
+ #ceph_mon_node03_address: "172.16.47.68"
+ #ceph_mon_node03_deploy_address: "172.16.48.68"
+ ceph_mon_node03_address: "10.167.8.68"
+ ceph_mon_node03_hostname: "cmn03"
+ #ceph_rgw_address: "172.16.47.75"
+ ceph_rgw_address: "10.167.8.75"
+ #ceph_rgw_node01_address: "172.16.47.76"
+ #ceph_rgw_node01_deploy_address: "172.16.48.76"
+ ceph_rgw_node01_address: "10.167.8.76"
+ ceph_rgw_node01_hostname: "rgw01"
+ #ceph_rgw_node02_address: "172.16.47.77"
+ #ceph_rgw_node02_deploy_address: "172.16.48.77"
+ ceph_rgw_node02_address: "10.167.8.77"
+ ceph_rgw_node02_hostname: "rgw02"
+ #ceph_rgw_node03_address: "172.16.47.78"
+ #ceph_rgw_node03_deploy_address: "172.16.48.78"
+ ceph_rgw_node03_address: "10.167.8.78"
+ ceph_rgw_node03_hostname: "rgw03"
+ manila_enabled: 'False'
+ barbican_enabled: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/salt-context-environment.yaml b/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/salt-context-environment.yaml
new file mode 100644
index 0000000..f150236
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/salt-context-environment.yaml
@@ -0,0 +1,388 @@
+nodes:
+ # Virtual Control Plane nodes
+ cid01.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid02.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid03.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl01.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - linux_system_codename_xenial
+ classes:
+ - system.linux.system.repo.mcp.apt_mirantis.docker
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl02.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl03.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs01.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs02.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs03.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg01.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg02.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg03.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ prx01.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ prx02.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon01.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon02.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon03.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ nal01.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: opencontrail_analytics_node01
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ nal02.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: opencontrail_analytics_node02
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ nal03.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: opencontrail_analytics_node03
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ntw01.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: opencontrail_control_node01
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ntw02.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: opencontrail_control_node02
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ntw03.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: opencontrail_control_node03
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr01.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr02.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr03.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log01.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log02.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log03.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn01.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: ceph_mon_node01
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn02.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: ceph_mon_node02
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn03.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: ceph_mon_node03
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw01.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: ceph_rgw_node01
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw02.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: ceph_rgw_node02
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw03.cookied-cicd-bm-ocata-contrail-maas.local:
+ reclass_storage_name: ceph_rgw_node03
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/salt.yaml b/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/salt.yaml
new file mode 100644
index 0000000..23b0b54
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/salt.yaml
@@ -0,0 +1,74 @@
+{% from 'cookied-cicd-bm-ocata-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-bm-ocata-contrail-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-bm-ocata-contrail-maas/underlay.yaml' import DOMAIN_NAME with context %}
+
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+- description: Wait for salt-master is ready after configdrive user-data
+ cmd: |
+ timeout 120 salt-call test.ping
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: Generate a public key for machines in MAAS
+ cmd: |
+ ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub
+ maas mirantis sshkeys create key="$(cat ~root/.ssh/id_rsa.pub)"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Run comissioning of BM nodes
+ cmd: |
+ salt-call maas.process_machines
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Wait for machines ready
+ cmd: |
+ salt-call maas.machines_status && timeout 120 salt-call state.sls maas.machines.wait_for_ready
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 7, delay: 5}
+ skip_fail: false
+
+- description: Enforce the interfaces configuration defined in the model for servers
+ cmd: |
+ salt-call state.sls maas.machines.assign_ip;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Remove all the salt-minions and re-register the cfg01 minion
+ cmd: |
+ salt-key -y -D;
+ salt-call test.ping
+ sleep 5
+ # Check that the cfg01 is registered
+ salt-key | grep cfg01
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: provision the automatically commissioned physical nodes through MAAS
+ cmd: |
+ salt-call maas.deploy_machines;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Wait for machines deployed
+ cmd: |
+ salt-call maas.machines_status && timeout 300 salt-call state.sls maas.machines.wait_for_deployed
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 6, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/underlay.yaml b/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/underlay.yaml
new file mode 100644
index 0000000..44a486d
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/underlay.yaml
@@ -0,0 +1,129 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
+
+#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-bm-ocata-contrail-maas') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-cicd-bm-ocata-contrail-maas_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
+ params:
+ ip_reserved:
+ gateway: '172.16.49.65'
+ l2_network_device: +61
+ default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+ ip_ranges:
+ dhcp: [+2, -3]
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ ip_ranges:
+ dhcp: [+2, -3]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ ip_ranges:
+ dhcp: [+2, -3]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: -2
+ ip_ranges:
+ dhcp: [+2, -3]
+
+ groups:
+
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+ network_pools:
+ admin: admin-pool01
+
+ l2_network_devices:
+ admin:
+ address_pool: admin-pool01
+ dhcp: false
+ forward:
+ mode: bridge
+ parent_iface:
+ phys_dev: !os_env LAB_MANAGEMENT_IFACE
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: bridge
+ parent_iface:
+ phys_dev: !os_env LAB_CONTROL_IFACE
+
+ group_volumes:
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
+ boot:
+ - hd
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: config
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/lab04-physical-inventory.yaml
new file mode 100644
index 0000000..01e14b1
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/lab04-physical-inventory.yaml
@@ -0,0 +1,78 @@
+nodes:
+ cfg01.cookied-cicd-bm-os-contrail32-maas.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ - features_runtest_cfg
+ interfaces:
+ ens3:
+ role: single_static_mgm
+ ens4:
+ role: single_static_ctl
+ # Physical nodes
+
+ kvm01.cookied-cicd-bm-os-contrail32-maas.local:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ one1:
+ role: single_dhcp
+ one2:
+ role: bond0_ab_ovs_vlan_ctl
+ ten1:
+ role: single_mgm_manual
+
+ kvm02.cookied-cicd-bm-os-contrail32-maas.local:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ one1:
+ role: single_dhcp
+ one2:
+ role: bond0_ab_ovs_vlan_ctl
+ ten1:
+ role: single_mgm_manual
+
+ kvm03.cookied-cicd-bm-os-contrail32-maas.local:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ one1:
+ role: single_dhcp
+ one2:
+ role: bond0_ab_ovs_vlan_ctl
+ ten1:
+ role: single_mgm_manual
+
+ osd<<count>>:
+ reclass_storage_name: ceph_osd_rack01
+ roles:
+ - ceph_osd
+ - linux_system_codename_xenial
+ interfaces:
+ one1:
+ role: single_dhcp
+ one2:
+ role: single_vlan_ctl
+# role: bond0_ab_vlan_ceph_storage_backend
+
+ cmp<<count>>:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ #one1: unused
+ one2:
+ role: single_dhcp
+ ten1:
+ role: bond0_ab_contrail
+ ten2:
+ role: single_vlan_ctl
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/salt-context-cookiecutter-contrail.yaml
new file mode 100644
index 0000000..9ec17de
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/salt-context-cookiecutter-contrail.yaml
@@ -0,0 +1,451 @@
+default_context:
+ backup_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
+ +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
+ qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
+ m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
+ 7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
+ 2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
+ HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
+ AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
+ o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
+ 5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
+ XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
+ AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
+ USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
+ uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
+ QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
+ 98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
+ r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
+ qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
+ CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
+ p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
+ 79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
+ NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
+ CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
+ XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
+ N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
+ bmk_enabled: 'False'
+ cicd_control_node01_address: 10.167.8.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.8.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.8.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.8.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
+ oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
+ IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
+ kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
+ wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
+ 27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
+ 5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
+ lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
+ k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
+ 3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
+ dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
+ 0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
+ qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
+ BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
+ UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
+ VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
+ 1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
+ nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
+ Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
+ FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
+ HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
+ Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
+ poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
+ 17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
+ l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
+ cluster_domain: cookied-cicd-bm-os-contrail32-maas-2018.8.0.local
+ cluster_name: cookied-cicd-bm-os-contrail32-maas-2018.8.0
+ opencontrail_version: 3.2
+ linux_repo_contrail_component: oc32
+ compute_bond_mode: active-backup
+ compute_padding_with_zeros: 'True'
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.8.0/24
+ control_vlan: '2422'
+ tenant_vlan: '2423'
+ backend_vlan: '2424'
+ storage_vlan: '2425' # not implemented yet, placeholder
+ cookiecutter_template_branch: ''
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+ deploy_network_gateway: 172.16.49.65
+ deploy_network_netmask: 255.255.255.192
+ deploy_network_subnet: 172.16.49.64/26
+ deployment_type: physical
+ dns_server01: 172.18.208.44
+ dns_server02: 172.18.176.6
+ email_address: sgudz@mirantis.com
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.8.241
+ infra_kvm01_deploy_address: 172.16.49.67
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.8.242
+ infra_kvm02_deploy_address: 172.16.49.68
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.8.243
+ infra_kvm03_deploy_address: 172.16.49.69
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.8.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ internal_proxy_enabled: 'False'
+ kqueen_custom_mail_enabled: 'False'
+ kqueen_enabled: 'False'
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_enabled: 'True'
+ maas_deploy_address: 172.16.49.66
+ maas_deploy_cidr: 172.16.49.64/26
+ maas_deploy_gateway: 172.16.49.65
+ maas_deploy_range_end: 172.16.49.119
+ maas_deploy_range_start: 172.16.49.77
+ maas_deploy_vlan: '0'
+ maas_dhcp_enabled: 'True'
+ maas_fabric_name: fabric-0
+ maas_hostname: cfg01
+ maas_manage_deploy_network: 'True'
+ maas_machines: |
+ kvm01: # cz7341-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ # pxe_interface_mac:
+ pxe_interface_mac: "0c:c4:7a:6c:83:56"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:6c:83:56"
+ mode: "static"
+ ip: "172.16.49.67"
+ subnet: "172.16.49.64/26" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.117"
+ #power_pass: ==IPMI_PASS==
+ power_password: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm02: # #cz7342-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6c:84:2c"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:6c:84:2c"
+ mode: "static"
+ ip: "172.16.49.68"
+ subnet: "172.16.49.64/26" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.118"
+ #power_pass: ==IPMI_PASS==
+ power_password: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm03: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6c:83:54"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:6c:83:54"
+ mode: "static"
+ ip: "172.16.49.69"
+ subnet: "172.16.49.64/26" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.119"
+ #power_pass: ==IPMI_PASS==
+ power_password: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd001: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:55:6a:d4"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:55:6a:d4"
+ mode: "static"
+ ip: "172.16.49.70"
+ subnet: "172.16.49.64/26" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.243"
+ #power_pass: ==IPMI_PASS==
+ power_password: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd002: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:55:6a:57"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:55:6a:57"
+ mode: "static"
+ ip: "172.16.49.71"
+ subnet: "172.16.49.64/26" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.244"
+ #power_pass: ==IPMI_PASS==
+ power_password: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd003: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:55:6a:2a"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:55:6a:2a"
+ mode: "static"
+ ip: "172.16.49.72"
+ subnet: "172.16.49.64/26" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.245"
+ #power_pass: ==IPMI_PASS==
+ power_password: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp001: # #cz7345-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:54:a2:5f"
+ interfaces:
+ one2:
+ mac: "0c:c4:7a:54:a2:5f"
+ mode: "static"
+ ip: "172.16.49.73"
+ subnet: "172.16.49.64/26" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.233"
+ #power_pass: ==IPMI_PASS==
+ power_password: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp002: # cz7346-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:54:a0:51"
+ interfaces:
+ one2:
+ mac: "0c:c4:7a:54:a0:51"
+ mode: "static"
+ ip: "172.16.49.74"
+ subnet: "172.16.49.64/26" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.232"
+ #power_pass: ==IPMI_PASS==
+ power_password: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+
+ mcp_common_scripts_branch: ''
+ mcp_version: 2018.8.0
+ offline_deployment: 'False'
+ opencontrail_analytics_address: 10.167.8.30
+ opencontrail_analytics_hostname: nal
+ opencontrail_analytics_node01_address: 10.167.8.31
+ opencontrail_analytics_node01_hostname: nal01
+ opencontrail_analytics_node02_address: 10.167.8.32
+ opencontrail_analytics_node02_hostname: nal02
+ opencontrail_analytics_node03_address: 10.167.8.33
+ opencontrail_analytics_node03_hostname: nal03
+ opencontrail_compute_iface_mask: '24'
+ opencontrail_control_address: 10.167.8.20
+ opencontrail_control_hostname: ntw
+ opencontrail_control_node01_address: 10.167.8.21
+ opencontrail_control_node01_hostname: ntw01
+ opencontrail_control_node02_address: 10.167.8.22
+ opencontrail_control_node02_hostname: ntw02
+ opencontrail_control_node03_address: 10.167.8.23
+ opencontrail_control_node03_hostname: ntw03
+ opencontrail_enabled: 'True'
+ opencontrail_router01_address: 10.167.8.220
+ opencontrail_router01_hostname: rtr01
+ opencontrail_router02_address: 10.167.8.101
+ opencontrail_router02_hostname: rtr02
+ openldap_enabled: 'False'
+ openssh_groups: ''
+ openstack_benchmark_node01_address: 10.167.8.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 10.167.8
+ openstack_compute_rack01_tenant_subnet: 192.168.0
+ openstack_compute_single_address_ranges: 10.167.8.101-10.167.8.102
+ openstack_compute_deploy_address_ranges: 172.16.49.73-172.16.49.74
+ openstack_compute_tenant_address_ranges: 10.167.10.101-10.167.10.102
+ openstack_compute_backend_address_ranges: 10.167.10.101-10.167.10.102
+ openstack_control_address: 10.167.8.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.8.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.8.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.8.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.8.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.8.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.8.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.8.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_message_queue_address: 10.167.8.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.8.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.8.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.8.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: opencontrail
+ openstack_neutron_bgp_vpn: 'False'
+ openstack_neutron_bgp_vpn_driver: bagpipe
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_nova_compute_reserved_host_memory_mb: '900'
+ openstack_proxy_address: 10.167.8.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.8.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.8.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.8.19
+ openstack_version: ocata
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_webhook_app_id: '24'
+ oss_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
+ salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
+ salt_master_address: 10.167.8.15
+ salt_master_hostname: cfg01
+ salt_master_management_address: 172.16.49.66
+ shared_reclass_branch: ''
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.8.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.8.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.8.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.8.63
+ stacklight_log_node03_hostname: log03
+ stacklight_long_term_storage_type: prometheus
+ stacklight_monitor_address: 10.167.8.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.8.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.8.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.8.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.8.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.8.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.8.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.8.88
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 192.168.0.220
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 192.168.0.0/24
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
+ openldap_domain: cookied-cicd-bm-os-contrail32-maas-2018.8.0.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ ceph_enabled: 'True'
+ ceph_version: "luminous"
+ ceph_hyper_converged: "False"
+ ceph_osd_backend: "bluestore"
+ ceph_osd_count: "3"
+ ceph_osd_node_count: 3
+ ceph_osd_block_db_size: 20
+ ceph_osd_journal_size: 20
+ ceph_osd_bond_mode: "active-backup"
+ ceph_osd_data_partition_prefix: ""
+
+ ceph_public_network_allocation: storage
+ ceph_public_network: "10.167.8.0/24"
+ ceph_cluster_network: "10.167.8.0/24"
+
+ # For compatibility with 2018.8.0
+ ceph_osd_rack01_deploy_subnet: '172.16.49'
+ ceph_osd_rack01_single_subnet: '10.167.8'
+ ceph_osd_rack01_backend_subnet: '10.167.8'
+
+# for 2018.11.0+
+# ceph_osd_deploy_address_ranges: "172.16.49.70-172.16.49.72"
+# ceph_osd_storage_address_ranges: "10.167.47.200-10.167.47.202"
+# ceph_osd_backend_address_ranges: "10.167.49.200-10.167.49.202"
+
+ ceph_osd_data_disks: "/dev/sdb"
+ ceph_osd_journal_or_block_db_disks: "/dev/sdb"
+ ceph_osd_mode: "separated"
+ ceph_osd_deploy_nic: "eth0"
+ ceph_osd_primary_first_nic: "eth1"
+ ceph_osd_primary_second_nic: "eth2"
+ #ceph_mon_node01_address: "172.16.47.66"
+ #ceph_mon_node01_deploy_address: "172.16.48.66"
+ ceph_mon_node01_address: "10.167.8.66"
+ ceph_mon_node01_hostname: "cmn01"
+ #ceph_mon_node02_address: "172.16.47.67"
+ #ceph_mon_node02_deploy_address: "172.16.48.67"
+ ceph_mon_node02_address: "10.167.8.67"
+ ceph_mon_node02_hostname: "cmn02"
+ #ceph_mon_node03_address: "172.16.47.68"
+ #ceph_mon_node03_deploy_address: "172.16.48.68"
+ ceph_mon_node03_address: "10.167.8.68"
+ ceph_mon_node03_hostname: "cmn03"
+ #ceph_rgw_address: "172.16.47.75"
+ ceph_rgw_address: "10.167.8.75"
+ #ceph_rgw_node01_address: "172.16.47.76"
+ #ceph_rgw_node01_deploy_address: "172.16.48.76"
+ ceph_rgw_node01_address: "10.167.8.76"
+ ceph_rgw_node01_hostname: "rgw01"
+ #ceph_rgw_node02_address: "172.16.47.77"
+ #ceph_rgw_node02_deploy_address: "172.16.48.77"
+ ceph_rgw_node02_address: "10.167.8.77"
+ ceph_rgw_node02_hostname: "rgw02"
+ #ceph_rgw_node03_address: "172.16.47.78"
+ #ceph_rgw_node03_deploy_address: "172.16.48.78"
+ ceph_rgw_node03_address: "10.167.8.78"
+ ceph_rgw_node03_hostname: "rgw03"
+ manila_enabled: 'False'
+ barbican_enabled: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/salt-context-environment.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/salt-context-environment.yaml
new file mode 100644
index 0000000..2f5b431
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/salt-context-environment.yaml
@@ -0,0 +1,395 @@
+nodes:
+ # Virtual Control Plane nodes
+ cid01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader_2018_8_0
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database_2018_8_0
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database_2018_8_0
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ prx01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ prx02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ nal01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_analytics_node01
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_trusty
+ interfaces:
+ eth1:
+ role: single_dhcp
+ eth2:
+ role: single_ctl
+
+ nal02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_analytics_node02
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_trusty
+ interfaces:
+ eth1:
+ role: single_dhcp
+ eth2:
+ role: single_ctl
+
+ nal03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_analytics_node03
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_trusty
+ interfaces:
+ eth1:
+ role: single_dhcp
+ eth2:
+ role: single_ctl
+
+ ntw01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_control_node01
+ roles:
+ - opencontrail_control
+ - linux_system_codename_trusty
+ interfaces:
+ eth1:
+ role: single_dhcp
+ eth2:
+ role: single_ctl
+
+ ntw02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_control_node02
+ roles:
+ - opencontrail_control
+ - linux_system_codename_trusty
+ interfaces:
+ eth1:
+ role: single_dhcp
+ eth2:
+ role: single_ctl
+
+ ntw03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_control_node03
+ roles:
+ - opencontrail_control
+ - linux_system_codename_trusty
+ interfaces:
+ eth1:
+ role: single_dhcp
+ eth2:
+ role: single_ctl
+
+ mtr01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: ceph_mon_node01
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: ceph_mon_node02
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: ceph_mon_node03
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: ceph_rgw_node01
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: ceph_rgw_node02
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: ceph_rgw_node03
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+# bmk01.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: openstack_benchmark_node01
+# roles:
+# - openstack_benchmark
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/salt.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/salt.yaml
new file mode 100644
index 0000000..b98e317
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/salt.yaml
@@ -0,0 +1,82 @@
+{% from 'cookied-cicd-bm-os-contrail32-maas-2018.8.0/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-bm-os-contrail32-maas-2018.8.0/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-bm-os-contrail32-maas-2018.8.0/underlay.yaml' import DOMAIN_NAME with context %}
+
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+- description: Wait for salt-master is ready after configdrive user-data
+ cmd: |
+ timeout 120 salt-call test.ping
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: Prepare maas
+ cmd: |
+ salt-call state.sls maas.cluster;
+ salt-call state.sls maas.region;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Generate a public key for machines in MAAS
+ cmd: |
+ ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub
+ maas mirantis sshkeys create key="$(cat ~root/.ssh/id_rsa.pub)"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Run comissioning of BM nodes
+ cmd: |
+ salt-call maas.process_machines
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Wait for machines ready
+ cmd: |
+ salt-call maas.machines_status && timeout 120 salt-call state.sls maas.machines.wait_for_ready
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 7, delay: 5}
+ skip_fail: false
+
+- description: Enforce the interfaces configuration defined in the model for servers
+ cmd: |
+ salt-call state.sls maas.machines.assign_ip;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Remove all the salt-minions and re-register the cfg01 minion
+ cmd: |
+ salt-key -y -D;
+ salt-call test.ping
+ sleep 5
+ # Check that the cfg01 is registered
+ salt-key | grep cfg01
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: provision the automatically commissioned physical nodes through MAAS
+ cmd: |
+ salt-call maas.deploy_machines;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Wait for machines deployed
+ cmd: |
+ salt-call maas.machines_status && timeout 300 salt-call state.sls maas.machines.wait_for_deployed
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 6, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/underlay.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/underlay.yaml
new file mode 100644
index 0000000..0cf698d
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/underlay.yaml
@@ -0,0 +1,129 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
+
+#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-bm-os-contrail32-maas-2018.8.0') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-cicd-bm-os-contrail32-maas-2018.8.0_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
+ params:
+ ip_reserved:
+ gateway: '172.16.49.65'
+ l2_network_device: +61
+ default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+ ip_ranges:
+ dhcp: [+2, -3]
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ ip_ranges:
+ dhcp: [+2, -3]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ ip_ranges:
+ dhcp: [+2, -3]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: -2
+ ip_ranges:
+ dhcp: [+2, -3]
+
+ groups:
+
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+ network_pools:
+ admin: admin-pool01
+
+ l2_network_devices:
+ admin:
+ address_pool: admin-pool01
+ dhcp: false
+ forward:
+ mode: bridge
+ parent_iface:
+ phys_dev: !os_env LAB_MANAGEMENT_IFACE
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: bridge
+ parent_iface:
+ phys_dev: !os_env LAB_CONTROL_IFACE
+
+ group_volumes:
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
+ boot:
+ - hd
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: config
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/lab04-physical-inventory.yaml
new file mode 100644
index 0000000..e1b92fb
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/lab04-physical-inventory.yaml
@@ -0,0 +1,78 @@
+nodes:
+ cfg01.cookied-cicd-bm-os-contrail40-maas.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ - features_runtest_cfg
+ interfaces:
+ ens3:
+ role: single_static_mgm
+ ens4:
+ role: single_static_ctl
+ # Physical nodes
+
+ kvm01.cookied-cicd-bm-os-contrail40-maas.local:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ one1:
+ role: single_dhcp
+ one2:
+ role: bond0_ab_ovs_vlan_ctl
+ ten1:
+ role: single_mgm_manual
+
+ kvm02.cookied-cicd-bm-os-contrail40-maas.local:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ one1:
+ role: single_dhcp
+ one2:
+ role: bond0_ab_ovs_vlan_ctl
+ ten1:
+ role: single_mgm_manual
+
+ kvm03.cookied-cicd-bm-os-contrail40-maas.local:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ one1:
+ role: single_dhcp
+ one2:
+ role: bond0_ab_ovs_vlan_ctl
+ ten1:
+ role: single_mgm_manual
+
+ osd<<count>>:
+ reclass_storage_name: ceph_osd_rack01
+ roles:
+ - ceph_osd
+ - linux_system_codename_xenial
+ interfaces:
+ one1:
+ role: single_dhcp
+ one2:
+ role: single_vlan_ctl
+# role: bond0_ab_vlan_ceph_storage_backend
+
+ cmp<<count>>:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ #one1: unused
+ one2:
+ role: single_dhcp
+ ten1:
+ role: bond0_ab_contrail
+ ten2:
+ role: single_vlan_ctl
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt-context-cookiecutter-contrail.yaml
new file mode 100644
index 0000000..29ad5cf
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt-context-cookiecutter-contrail.yaml
@@ -0,0 +1,451 @@
+default_context:
+ backup_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
+ +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
+ qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
+ m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
+ 7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
+ 2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
+ HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
+ AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
+ o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
+ 5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
+ XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
+ AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
+ USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
+ uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
+ QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
+ 98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
+ r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
+ qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
+ CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
+ p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
+ 79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
+ NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
+ CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
+ XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
+ N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
+ bmk_enabled: 'False'
+ cicd_control_node01_address: 10.167.8.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.8.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.8.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.8.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
+ oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
+ IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
+ kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
+ wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
+ 27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
+ 5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
+ lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
+ k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
+ 3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
+ dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
+ 0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
+ qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
+ BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
+ UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
+ VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
+ 1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
+ nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
+ Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
+ FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
+ HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
+ Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
+ poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
+ 17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
+ l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
+ cluster_domain: cookied-bm-4.0-contrail.local
+ cluster_name: cookied-bm-4.0-contrail
+ opencontrail_version: 4.0
+ linux_repo_contrail_component: oc40
+ compute_bond_mode: active-backup
+ compute_padding_with_zeros: 'True'
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.8.0/24
+ control_vlan: '2422'
+ tenant_vlan: '2423'
+ backend_vlan: '2424'
+ storage_vlan: '2425' # not implemented yet, placeholder
+ cookiecutter_template_branch: ''
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+ deploy_network_gateway: 172.16.49.65
+ deploy_network_netmask: 255.255.255.192
+ deploy_network_subnet: 172.16.49.64/26
+ deployment_type: physical
+ dns_server01: 172.18.208.44
+ dns_server02: 172.18.176.6
+ email_address: sgudz@mirantis.com
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.8.241
+ infra_kvm01_deploy_address: 172.16.49.67
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.8.242
+ infra_kvm02_deploy_address: 172.16.49.68
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.8.243
+ infra_kvm03_deploy_address: 172.16.49.69
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.8.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ internal_proxy_enabled: 'False'
+ kqueen_custom_mail_enabled: 'False'
+ kqueen_enabled: 'False'
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_enabled: 'True'
+ maas_deploy_address: 172.16.49.66
+ maas_deploy_cidr: 172.16.49.64/26
+ maas_deploy_gateway: 172.16.49.65
+ maas_deploy_range_end: 172.16.49.119
+ maas_deploy_range_start: 172.16.49.77
+ maas_deploy_vlan: '0'
+ maas_dhcp_enabled: 'True'
+ maas_fabric_name: fabric-0
+ maas_hostname: cfg01
+ maas_manage_deploy_network: 'True'
+ maas_machines: |
+ kvm01: # cz7341-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ # pxe_interface_mac:
+ pxe_interface_mac: "0c:c4:7a:6c:83:56"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:6c:83:56"
+ mode: "static"
+ ip: "172.16.49.67"
+ subnet: "172.16.49.64/26" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.117"
+ #power_pass: ==IPMI_PASS==
+ power_password: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm02: # #cz7342-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6c:84:2c"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:6c:84:2c"
+ mode: "static"
+ ip: "172.16.49.68"
+ subnet: "172.16.49.64/26" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.118"
+ #power_pass: ==IPMI_PASS==
+ power_password: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm03: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6c:83:54"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:6c:83:54"
+ mode: "static"
+ ip: "172.16.49.69"
+ subnet: "172.16.49.64/26" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.119"
+ #power_pass: ==IPMI_PASS==
+ power_password: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd001: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:55:6a:d4"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:55:6a:d4"
+ mode: "static"
+ ip: "172.16.49.70"
+ subnet: "172.16.49.64/26" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.243"
+ #power_pass: ==IPMI_PASS==
+ power_password: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd002: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:55:6a:57"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:55:6a:57"
+ mode: "static"
+ ip: "172.16.49.71"
+ subnet: "172.16.49.64/26" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.244"
+ #power_pass: ==IPMI_PASS==
+ power_password: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd003: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:55:6a:2a"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:55:6a:2a"
+ mode: "static"
+ ip: "172.16.49.72"
+ subnet: "172.16.49.64/26" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.245"
+ #power_pass: ==IPMI_PASS==
+ power_password: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp001: # #cz7345-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:54:a2:5f"
+ interfaces:
+ one2:
+ mac: "0c:c4:7a:54:a2:5f"
+ mode: "static"
+ ip: "172.16.49.73"
+ subnet: "172.16.49.64/26" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.233"
+ #power_pass: ==IPMI_PASS==
+ power_password: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp002: # cz7346-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:54:a0:51"
+ interfaces:
+ one2:
+ mac: "0c:c4:7a:54:a0:51"
+ mode: "static"
+ ip: "172.16.49.74"
+ subnet: "172.16.49.64/26" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.232"
+ #power_pass: ==IPMI_PASS==
+ power_password: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+
+ mcp_common_scripts_branch: ''
+ mcp_version: proposed
+ offline_deployment: 'False'
+ opencontrail_analytics_address: 10.167.8.30
+ opencontrail_analytics_hostname: nal
+ opencontrail_analytics_node01_address: 10.167.8.31
+ opencontrail_analytics_node01_hostname: nal01
+ opencontrail_analytics_node02_address: 10.167.8.32
+ opencontrail_analytics_node02_hostname: nal02
+ opencontrail_analytics_node03_address: 10.167.8.33
+ opencontrail_analytics_node03_hostname: nal03
+ opencontrail_compute_iface_mask: '24'
+ opencontrail_control_address: 10.167.8.20
+ opencontrail_control_hostname: ntw
+ opencontrail_control_node01_address: 10.167.8.21
+ opencontrail_control_node01_hostname: ntw01
+ opencontrail_control_node02_address: 10.167.8.22
+ opencontrail_control_node02_hostname: ntw02
+ opencontrail_control_node03_address: 10.167.8.23
+ opencontrail_control_node03_hostname: ntw03
+ opencontrail_enabled: 'True'
+ opencontrail_router01_address: 10.167.8.220
+ opencontrail_router01_hostname: rtr01
+ opencontrail_router02_address: 10.167.8.101
+ opencontrail_router02_hostname: rtr02
+ openldap_enabled: 'False'
+ openssh_groups: ''
+ openstack_benchmark_node01_address: 10.167.8.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 10.167.8
+ openstack_compute_rack01_tenant_subnet: 192.168.0
+ openstack_compute_single_address_ranges: 10.167.8.101-10.167.8.102
+ openstack_compute_deploy_address_ranges: 172.16.49.73-172.16.49.74
+ openstack_compute_tenant_address_ranges: 10.167.10.101-10.167.10.102
+ openstack_compute_backend_address_ranges: 10.167.10.101-10.167.10.102
+ openstack_control_address: 10.167.8.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.8.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.8.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.8.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.8.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.8.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.8.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.8.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_message_queue_address: 10.167.8.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.8.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.8.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.8.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: opencontrail
+ openstack_neutron_bgp_vpn: 'False'
+ openstack_neutron_bgp_vpn_driver: bagpipe
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_nova_compute_reserved_host_memory_mb: '900'
+ openstack_proxy_address: 10.167.8.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.8.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.8.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.8.19
+ openstack_version: pike
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_webhook_app_id: '24'
+ oss_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
+ salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
+ salt_master_address: 10.167.8.15
+ salt_master_hostname: cfg01
+ salt_master_management_address: 172.16.49.66
+ shared_reclass_branch: ''
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.8.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.8.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.8.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.8.63
+ stacklight_log_node03_hostname: log03
+ stacklight_long_term_storage_type: prometheus
+ stacklight_monitor_address: 10.167.8.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.8.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.8.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.8.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.8.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.8.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.8.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.8.88
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 192.168.0.220
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 192.168.0.0/24
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
+ openldap_domain: cookied-bm-4.0-contrail.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ ceph_enabled: 'True'
+ ceph_version: "luminous"
+ ceph_hyper_converged: "False"
+ ceph_osd_backend: "bluestore"
+ ceph_osd_count: "3"
+ ceph_osd_node_count: 3
+ ceph_osd_block_db_size: 20
+ ceph_osd_journal_size: 20
+ ceph_osd_bond_mode: "active-backup"
+ ceph_osd_data_partition_prefix: ""
+
+ ceph_public_network_allocation: storage
+ ceph_public_network: "10.167.8.0/24"
+ ceph_cluster_network: "10.167.8.0/24"
+
+ # For compatibility with 2018.8.0
+ ceph_osd_rack01_deploy_subnet: '172.16.49'
+ ceph_osd_rack01_single_subnet: '10.167.8'
+ ceph_osd_rack01_backend_subnet: '10.167.8'
+
+# for 2018.11.0+
+# ceph_osd_deploy_address_ranges: "172.16.49.70-172.16.49.72"
+# ceph_osd_storage_address_ranges: "10.167.47.200-10.167.47.202"
+# ceph_osd_backend_address_ranges: "10.167.49.200-10.167.49.202"
+
+ ceph_osd_data_disks: "/dev/sdb"
+ ceph_osd_journal_or_block_db_disks: "/dev/sdb"
+ ceph_osd_mode: "separated"
+ ceph_osd_deploy_nic: "eth0"
+ ceph_osd_primary_first_nic: "eth1"
+ ceph_osd_primary_second_nic: "eth2"
+ #ceph_mon_node01_address: "172.16.47.66"
+ #ceph_mon_node01_deploy_address: "172.16.48.66"
+ ceph_mon_node01_address: "10.167.8.66"
+ ceph_mon_node01_hostname: "cmn01"
+ #ceph_mon_node02_address: "172.16.47.67"
+ #ceph_mon_node02_deploy_address: "172.16.48.67"
+ ceph_mon_node02_address: "10.167.8.67"
+ ceph_mon_node02_hostname: "cmn02"
+ #ceph_mon_node03_address: "172.16.47.68"
+ #ceph_mon_node03_deploy_address: "172.16.48.68"
+ ceph_mon_node03_address: "10.167.8.68"
+ ceph_mon_node03_hostname: "cmn03"
+ #ceph_rgw_address: "172.16.47.75"
+ ceph_rgw_address: "10.167.8.75"
+ #ceph_rgw_node01_address: "172.16.47.76"
+ #ceph_rgw_node01_deploy_address: "172.16.48.76"
+ ceph_rgw_node01_address: "10.167.8.76"
+ ceph_rgw_node01_hostname: "rgw01"
+ #ceph_rgw_node02_address: "172.16.47.77"
+ #ceph_rgw_node02_deploy_address: "172.16.48.77"
+ ceph_rgw_node02_address: "10.167.8.77"
+ ceph_rgw_node02_hostname: "rgw02"
+ #ceph_rgw_node03_address: "172.16.47.78"
+ #ceph_rgw_node03_deploy_address: "172.16.48.78"
+ ceph_rgw_node03_address: "10.167.8.78"
+ ceph_rgw_node03_hostname: "rgw03"
+ manila_enabled: 'False'
+ barbican_enabled: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt-context-environment.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt-context-environment.yaml
new file mode 100644
index 0000000..025f4e7
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt-context-environment.yaml
@@ -0,0 +1,395 @@
+nodes:
+ # Virtual Control Plane nodes
+ cid01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ prx01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ prx02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ nal01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_analytics_node01
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ nal02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_analytics_node02
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ nal03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_analytics_node03
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ntw01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_control_node01
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ntw02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_control_node02
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ntw03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_control_node03
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: ceph_mon_node01
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: ceph_mon_node02
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: ceph_mon_node03
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: ceph_rgw_node01
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: ceph_rgw_node02
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: ceph_rgw_node03
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+# bmk01.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: openstack_benchmark_node01
+# roles:
+# - openstack_benchmark
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt.yaml
new file mode 100644
index 0000000..da93592
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt.yaml
@@ -0,0 +1,82 @@
+{% from 'cookied-cicd-bm-os-contrail40-maas-2018.8.0/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-bm-os-contrail40-maas-2018.8.0/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-bm-os-contrail40-maas-2018.8.0/underlay.yaml' import DOMAIN_NAME with context %}
+
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+- description: Wait for salt-master is ready after configdrive user-data
+ cmd: |
+ timeout 120 salt-call test.ping
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: Prepare maas
+ cmd: |
+ salt-call state.sls maas.cluster;
+ salt-call state.sls maas.region;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Generate a public key for machines in MAAS
+ cmd: |
+ ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub
+ maas mirantis sshkeys create key="$(cat ~root/.ssh/id_rsa.pub)"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Run comissioning of BM nodes
+ cmd: |
+ salt-call maas.process_machines
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Wait for machines ready
+ cmd: |
+ salt-call maas.machines_status && timeout 120 salt-call state.sls maas.machines.wait_for_ready
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 7, delay: 5}
+ skip_fail: false
+
+- description: Enforce the interfaces configuration defined in the model for servers
+ cmd: |
+ salt-call state.sls maas.machines.assign_ip;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Remove all the salt-minions and re-register the cfg01 minion
+ cmd: |
+ salt-key -y -D;
+ salt-call test.ping
+ sleep 5
+ # Check that the cfg01 is registered
+ salt-key | grep cfg01
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: provision the automatically commissioned physical nodes through MAAS
+ cmd: |
+ salt-call maas.deploy_machines;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Wait for machines deployed
+ cmd: |
+ salt-call maas.machines_status && timeout 300 salt-call state.sls maas.machines.wait_for_deployed
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 6, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/underlay.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/underlay.yaml
new file mode 100644
index 0000000..ac301bd
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/underlay.yaml
@@ -0,0 +1,129 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
+
+#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-bm-os-contrail40-maas-2018.8.0') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-cicd-bm-os-contrail40-maas-2018.8.0_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
+ params:
+ ip_reserved:
+ gateway: '172.16.49.65'
+ l2_network_device: +61
+ default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+ ip_ranges:
+ dhcp: [+2, -3]
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ ip_ranges:
+ dhcp: [+2, -3]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ ip_ranges:
+ dhcp: [+2, -3]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: -2
+ ip_ranges:
+ dhcp: [+2, -3]
+
+ groups:
+
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+ network_pools:
+ admin: admin-pool01
+
+ l2_network_devices:
+ admin:
+ address_pool: admin-pool01
+ dhcp: false
+ forward:
+ mode: bridge
+ parent_iface:
+ phys_dev: !os_env LAB_MANAGEMENT_IFACE
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: bridge
+ parent_iface:
+ phys_dev: !os_env LAB_CONTROL_IFACE
+
+ group_volumes:
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
+ boot:
+ - hd
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: config
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/lab04-physical-inventory.yaml
new file mode 100644
index 0000000..27b5d25
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/lab04-physical-inventory.yaml
@@ -0,0 +1,77 @@
+nodes:
+ cfg01.cookied-cicd-bm-os-contrail40-maas.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ - features_runtest_cfg
+ interfaces:
+ ens3:
+ role: single_static_mgm
+ ens4:
+ role: single_static_ctl
+ # Physical nodes
+
+ kvm01.cookied-cicd-bm-os-contrail40-maas.local:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_dhcp
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+ ens11f0:
+ role: single_mgm_manual
+
+ kvm02.cookied-cicd-bm-os-contrail40-maas.local:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_dhcp
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+ ens11f0:
+ role: single_mgm_manual
+
+ kvm03.cookied-cicd-bm-os-contrail40-maas.local:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_dhcp
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+ ens11f0:
+ role: single_mgm_manual
+
+ osd<<count>>:
+ reclass_storage_name: ceph_osd_rack01
+ roles:
+ - ceph_osd
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_dhcp
+ enp2s0f1:
+ role: single_vlan_ctl
+# role: bond0_ab_vlan_ceph_storage_backend
+
+ cmp<<count>>:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f1:
+ role: single_dhcp
+ enp5s0f0:
+ role: bond0_ab_contrail
+ enp5s0f1:
+ role: single_vlan_ctl
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-cookiecutter-contrail.yaml
new file mode 100644
index 0000000..75c3907
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-cookiecutter-contrail.yaml
@@ -0,0 +1,437 @@
+default_context:
+ backup_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
+ +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
+ qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
+ m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
+ 7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
+ 2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
+ HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
+ AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
+ o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
+ 5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
+ XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
+ AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
+ USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
+ uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
+ QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
+ 98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
+ r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
+ qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
+ CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
+ p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
+ 79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
+ NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
+ CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
+ XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
+ N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
+ bmk_enabled: 'False'
+ cicd_control_node01_address: 10.167.8.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.8.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.8.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.8.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
+ oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
+ IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
+ kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
+ wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
+ 27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
+ 5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
+ lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
+ k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
+ 3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
+ dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
+ 0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
+ qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
+ BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
+ UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
+ VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
+ 1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
+ nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
+ Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
+ FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
+ HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
+ Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
+ poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
+ 17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
+ l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
+ cluster_domain: cookied-bm-4.0-contrail.local
+ cluster_name: cookied-bm-4.0-contrail
+ opencontrail_version: 4.1
+ linux_repo_contrail_component: oc41
+ compute_bond_mode: active-backup
+ compute_padding_with_zeros: 'True'
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.8.0/24
+ control_vlan: '2422'
+ tenant_vlan: '2423'
+ backend_vlan: '2424'
+ storage_vlan: '2425' # not implemented yet, placeholder
+ cookiecutter_template_branch: ''
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+ deploy_network_gateway: 172.16.49.65
+ deploy_network_netmask: 255.255.255.192
+ deploy_network_subnet: 172.16.49.64/26
+ deployment_type: physical
+ dns_server01: 172.18.208.44
+ dns_server02: 172.18.176.6
+ email_address: sgudz@mirantis.com
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.8.241
+ infra_kvm01_deploy_address: 172.16.49.67
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.8.242
+ infra_kvm02_deploy_address: 172.16.49.68
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.8.243
+ infra_kvm03_deploy_address: 172.16.49.69
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.8.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ internal_proxy_enabled: 'False'
+ kqueen_custom_mail_enabled: 'False'
+ kqueen_enabled: 'False'
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_enabled: 'True'
+ maas_deploy_address: 172.16.49.66
+ maas_deploy_cidr: 172.16.49.64/26
+ maas_deploy_gateway: 172.16.49.65
+ maas_deploy_range_end: 172.16.49.119
+ maas_deploy_range_start: 172.16.49.77
+ maas_deploy_vlan: '0'
+ maas_dhcp_enabled: 'True'
+ maas_fabric_name: fabric-0
+ maas_hostname: cfg01
+ maas_manage_deploy_network: 'True'
+ maas_machines: |
+ kvm01: # cz7341-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ # pxe_interface_mac:
+ pxe_interface_mac: "0c:c4:7a:6c:83:56"
+ interfaces:
+ enp9s0f0:
+ mac: "0c:c4:7a:6c:83:56"
+ mode: "static"
+ ip: "172.16.49.67"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.117"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm02: # #cz7342-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6c:84:2c"
+ interfaces:
+ enp9s0f0:
+ mac: "0c:c4:7a:6c:84:2c"
+ mode: "static"
+ ip: "172.16.49.68"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.118"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm03: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6c:83:54"
+ interfaces:
+ enp9s0f0:
+ mac: "0c:c4:7a:6c:83:54"
+ mode: "static"
+ ip: "172.16.49.69"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.119"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd001: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:55:6a:d4"
+ interfaces:
+ enp2s0f0:
+ mac: "0c:c4:7a:55:6a:d4"
+ mode: "static"
+ ip: "172.16.49.70"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.243"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd002: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:55:6a:56"
+ interfaces:
+ enp2s0f0:
+ mac: "0c:c4:7a:55:6a:56"
+ mode: "static"
+ ip: "172.16.49.71"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.244"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd003: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:55:6a:2a"
+ interfaces:
+ enp2s0f0:
+ mac: "0c:c4:7a:55:6a:2a"
+ mode: "static"
+ ip: "172.16.49.72"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.245"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp001: # #cz7345-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:54:a2:5f"
+ interfaces:
+ enp2s0f1:
+ mac: "0c:c4:7a:54:a2:5f"
+ mode: "static"
+ ip: "172.16.49.73"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.233"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp002: # cz7346-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:54:a0:51"
+ interfaces:
+ enp2s0f1:
+ mac: "0c:c4:7a:54:a0:51"
+ mode: "static"
+ ip: "172.16.49.74"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.232"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+
+ mcp_common_scripts_branch: ''
+ mcp_version: proposed
+ offline_deployment: 'False'
+ opencontrail_analytics_address: 10.167.8.30
+ opencontrail_analytics_hostname: nal
+ opencontrail_analytics_node01_address: 10.167.8.31
+ opencontrail_analytics_node01_hostname: nal01
+ opencontrail_analytics_node02_address: 10.167.8.32
+ opencontrail_analytics_node02_hostname: nal02
+ opencontrail_analytics_node03_address: 10.167.8.33
+ opencontrail_analytics_node03_hostname: nal03
+ opencontrail_compute_iface_mask: '24'
+ opencontrail_control_address: 10.167.8.20
+ opencontrail_control_hostname: ntw
+ opencontrail_control_node01_address: 10.167.8.21
+ opencontrail_control_node01_hostname: ntw01
+ opencontrail_control_node02_address: 10.167.8.22
+ opencontrail_control_node02_hostname: ntw02
+ opencontrail_control_node03_address: 10.167.8.23
+ opencontrail_control_node03_hostname: ntw03
+ opencontrail_enabled: 'True'
+ opencontrail_router01_address: 10.167.8.220
+ opencontrail_router01_hostname: rtr01
+ opencontrail_router02_address: 10.167.8.101
+ opencontrail_router02_hostname: rtr02
+ openldap_enabled: 'False'
+ openssh_groups: ''
+ openstack_benchmark_node01_address: 10.167.8.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_single_address_ranges: 10.167.8.101-10.167.8.102
+ openstack_compute_deploy_address_ranges: 172.16.49.73-172.16.49.74
+ openstack_compute_tenant_address_ranges: 10.167.10.101-10.167.10.102
+ openstack_compute_backend_address_ranges: 10.167.10.101-10.167.10.102
+ openstack_control_address: 10.167.8.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.8.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.8.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.8.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.8.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.8.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.8.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.8.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_message_queue_address: 10.167.8.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.8.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.8.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.8.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: opencontrail
+ openstack_neutron_bgp_vpn: 'False'
+ openstack_neutron_bgp_vpn_driver: bagpipe
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_nova_compute_reserved_host_memory_mb: '900'
+ openstack_proxy_address: 10.167.8.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.8.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.8.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.8.19
+ openstack_version: pike
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_webhook_app_id: '24'
+ oss_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
+ salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
+ salt_master_address: 10.167.8.15
+ salt_master_hostname: cfg01
+ salt_master_management_address: 172.16.49.66
+ shared_reclass_branch: ''
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.8.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.8.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.8.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.8.63
+ stacklight_log_node03_hostname: log03
+ stacklight_long_term_storage_type: prometheus
+ stacklight_monitor_address: 10.167.8.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.8.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.8.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.8.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.8.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.8.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.8.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.8.88
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 192.168.0.220
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 192.168.0.0/24
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
+ openldap_domain: cookied-bm-4.0-contrail.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ ceph_enabled: 'True'
+ ceph_version: "luminous"
+ ceph_hyper_converged: "False"
+ ceph_osd_backend: "bluestore"
+ ceph_osd_count: "3"
+ ceph_osd_node_count: 3
+ ceph_osd_block_db_size: 20
+ ceph_osd_journal_size: 20
+ ceph_osd_bond_mode: "active-backup"
+ ceph_osd_data_partition_prefix: ""
+
+ ceph_public_network_allocation: storage
+ ceph_public_network: "10.167.8.0/24"
+ ceph_cluster_network: "10.167.8.0/24"
+
+# for 2018.11.0+
+ ceph_osd_single_address_ranges: "10.167.8.200-10.167.8.202"
+ ceph_osd_deploy_address_ranges: "172.16.49.70-172.16.49.72"
+ ceph_osd_storage_address_ranges: "10.167.8.200-10.167.8.202"
+ ceph_osd_backend_address_ranges: "10.167.10.200-10.167.10.202"
+
+ ceph_osd_data_disks: "/dev/sdb"
+ ceph_osd_journal_or_block_db_disks: "/dev/sdb"
+ ceph_osd_mode: "separated"
+ ceph_osd_deploy_nic: "eth0"
+ ceph_osd_primary_first_nic: "eth1"
+ ceph_osd_primary_second_nic: "eth2"
+ #ceph_mon_node01_address: "172.16.47.66"
+ #ceph_mon_node01_deploy_address: "172.16.48.66"
+ ceph_mon_node01_address: "10.167.8.66"
+ ceph_mon_node01_hostname: "cmn01"
+ #ceph_mon_node02_address: "172.16.47.67"
+ #ceph_mon_node02_deploy_address: "172.16.48.67"
+ ceph_mon_node02_address: "10.167.8.67"
+ ceph_mon_node02_hostname: "cmn02"
+ #ceph_mon_node03_address: "172.16.47.68"
+ #ceph_mon_node03_deploy_address: "172.16.48.68"
+ ceph_mon_node03_address: "10.167.8.68"
+ ceph_mon_node03_hostname: "cmn03"
+ #ceph_rgw_address: "172.16.47.75"
+ ceph_rgw_address: "10.167.8.75"
+ #ceph_rgw_node01_address: "172.16.47.76"
+ #ceph_rgw_node01_deploy_address: "172.16.48.76"
+ ceph_rgw_node01_address: "10.167.8.76"
+ ceph_rgw_node01_hostname: "rgw01"
+ #ceph_rgw_node02_address: "172.16.47.77"
+ #ceph_rgw_node02_deploy_address: "172.16.48.77"
+ ceph_rgw_node02_address: "10.167.8.77"
+ ceph_rgw_node02_hostname: "rgw02"
+ #ceph_rgw_node03_address: "172.16.47.78"
+ #ceph_rgw_node03_deploy_address: "172.16.48.78"
+ ceph_rgw_node03_address: "10.167.8.78"
+ ceph_rgw_node03_hostname: "rgw03"
+ manila_enabled: 'False'
+ barbican_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-environment.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-environment.yaml
new file mode 100644
index 0000000..5db9637
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-environment.yaml
@@ -0,0 +1,397 @@
+nodes:
+ # Virtual Control Plane nodes
+ cid01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - linux_system_codename_xenial
+ classes:
+ - system.linux.system.repo.mcp.apt_mirantis.docker
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ prx01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ prx02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ nal01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_analytics_node01
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ nal02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_analytics_node02
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ nal03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_analytics_node03
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ntw01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_control_node01
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ntw02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_control_node02
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ntw03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_control_node03
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: ceph_mon_node01
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: ceph_mon_node02
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: ceph_mon_node03
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: ceph_rgw_node01
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: ceph_rgw_node02
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: ceph_rgw_node03
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+# bmk01.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: openstack_benchmark_node01
+# roles:
+# - openstack_benchmark
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt.yaml
new file mode 100644
index 0000000..a7b3677
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt.yaml
@@ -0,0 +1,74 @@
+{% from 'cookied-cicd-bm-os-contrail40-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-bm-os-contrail40-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-bm-os-contrail40-maas/underlay.yaml' import DOMAIN_NAME with context %}
+
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+- description: Wait for salt-master is ready after configdrive user-data
+ cmd: |
+ timeout 120 salt-call test.ping
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: Generate a public key for machines in MAAS
+ cmd: |
+ ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub
+ maas mirantis sshkeys create key="$(cat ~root/.ssh/id_rsa.pub)"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Run comissioning of BM nodes
+ cmd: |
+ salt-call maas.process_machines
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Wait for machines ready
+ cmd: |
+ salt-call maas.machines_status && timeout 120 salt-call state.sls maas.machines.wait_for_ready
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 7, delay: 5}
+ skip_fail: false
+
+- description: Enforce the interfaces configuration defined in the model for servers
+ cmd: |
+ salt-call state.sls maas.machines.assign_ip;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Remove all the salt-minions and re-register the cfg01 minion
+ cmd: |
+ salt-key -y -D;
+ salt-call test.ping
+ sleep 5
+ # Check that the cfg01 is registered
+ salt-key | grep cfg01
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: provision the automatically commissioned physical nodes through MAAS
+ cmd: |
+ salt-call maas.deploy_machines;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Wait for machines deployed
+ cmd: |
+ salt-call maas.machines_status && timeout 300 salt-call state.sls maas.machines.wait_for_deployed
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 6, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/underlay.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/underlay.yaml
new file mode 100644
index 0000000..0a5f4f2
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/underlay.yaml
@@ -0,0 +1,129 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
+
+#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-bm-os-contrail40-maas') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-cicd-bm-os-contrail40-maas_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
+ params:
+ ip_reserved:
+ gateway: '172.16.49.65'
+ l2_network_device: +61
+ default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+ ip_ranges:
+ dhcp: [+2, -3]
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ ip_ranges:
+ dhcp: [+2, -3]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ ip_ranges:
+ dhcp: [+2, -3]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: -2
+ ip_ranges:
+ dhcp: [+2, -3]
+
+ groups:
+
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+ network_pools:
+ admin: admin-pool01
+
+ l2_network_devices:
+ admin:
+ address_pool: admin-pool01
+ dhcp: false
+ forward:
+ mode: bridge
+ parent_iface:
+ phys_dev: !os_env LAB_MANAGEMENT_IFACE
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: bridge
+ parent_iface:
+ phys_dev: !os_env LAB_CONTROL_IFACE
+
+ group_volumes:
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
+ boot:
+ - hd
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: config
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
diff --git a/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/lab04-physical-inventory.yaml
new file mode 100644
index 0000000..1952ac8
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/lab04-physical-inventory.yaml
@@ -0,0 +1,77 @@
+nodes:
+ cfg01.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ - features_runtest_cfg
+ interfaces:
+ ens3:
+ role: single_static_mgm
+ ens4:
+ role: single_static_ctl
+ # Physical nodes
+
+ kvm01.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_dhcp
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+ ens11f0:
+ role: single_mgm_manual
+
+ kvm02.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_dhcp
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+ ens11f0:
+ role: single_mgm_manual
+
+ kvm03.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_dhcp
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+ ens11f0:
+ role: single_mgm_manual
+
+ osd<<count>>:
+ reclass_storage_name: ceph_osd_rack01
+ roles:
+ - ceph_osd
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_dhcp
+ enp2s0f1:
+ role: single_vlan_ctl
+# role: bond0_ab_vlan_ceph_storage_backend
+
+ cmp<<count>>:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f1:
+ role: single_dhcp
+ enp5s0f0:
+ role: bond0_ab_contrail
+ enp5s0f1:
+ role: single_vlan_ctl
diff --git a/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/salt-context-cookiecutter-contrail.yaml
new file mode 100644
index 0000000..9d00697
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/salt-context-cookiecutter-contrail.yaml
@@ -0,0 +1,437 @@
+default_context:
+ backup_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
+ +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
+ qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
+ m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
+ 7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
+ 2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
+ HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
+ AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
+ o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
+ 5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
+ XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
+ AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
+ USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
+ uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
+ QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
+ 98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
+ r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
+ qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
+ CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
+ p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
+ 79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
+ NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
+ CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
+ XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
+ N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
+ bmk_enabled: 'False'
+ cicd_control_node01_address: 10.167.8.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.8.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.8.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.8.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
+ oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
+ IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
+ kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
+ wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
+ 27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
+ 5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
+ lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
+ k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
+ 3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
+ dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
+ 0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
+ qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
+ BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
+ UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
+ VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
+ 1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
+ nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
+ Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
+ FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
+ HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
+ Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
+ poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
+ 17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
+ l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
+ cluster_domain: cookied-cicd-bm-queens-contrail-maas.local
+ cluster_name: cookied-cicd-bm-queens-contrail-maas
+ opencontrail_version: 4.1
+ linux_repo_contrail_component: oc41
+ compute_bond_mode: active-backup
+ compute_padding_with_zeros: 'True'
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.8.0/24
+ control_vlan: '2422'
+ tenant_vlan: '2423'
+ backend_vlan: '2424'
+ storage_vlan: '2425' # not implemented yet, placeholder
+ cookiecutter_template_branch: ''
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+ deploy_network_gateway: 172.16.49.65
+ deploy_network_netmask: 255.255.255.192
+ deploy_network_subnet: 172.16.49.64/26
+ deployment_type: physical
+ dns_server01: 172.18.208.44
+ dns_server02: 172.18.176.6
+ email_address: sgudz@mirantis.com
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.8.241
+ infra_kvm01_deploy_address: 172.16.49.67
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.8.242
+ infra_kvm02_deploy_address: 172.16.49.68
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.8.243
+ infra_kvm03_deploy_address: 172.16.49.69
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.8.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ internal_proxy_enabled: 'False'
+ kqueen_custom_mail_enabled: 'False'
+ kqueen_enabled: 'False'
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_enabled: 'True'
+ maas_deploy_address: 172.16.49.66
+ maas_deploy_cidr: 172.16.49.64/26
+ maas_deploy_gateway: 172.16.49.65
+ maas_deploy_range_end: 172.16.49.119
+ maas_deploy_range_start: 172.16.49.77
+ maas_deploy_vlan: '0'
+ maas_dhcp_enabled: 'True'
+ maas_fabric_name: fabric-0
+ maas_hostname: cfg01
+ maas_manage_deploy_network: 'True'
+ maas_machines: |
+ kvm01: # cz7341-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ # pxe_interface_mac:
+ pxe_interface_mac: "0c:c4:7a:6c:83:56"
+ interfaces:
+ enp9s0f0:
+ mac: "0c:c4:7a:6c:83:56"
+ mode: "static"
+ ip: "172.16.49.67"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.117"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm02: # #cz7342-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6c:84:2c"
+ interfaces:
+ enp9s0f0:
+ mac: "0c:c4:7a:6c:84:2c"
+ mode: "static"
+ ip: "172.16.49.68"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.118"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm03: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6c:83:54"
+ interfaces:
+ enp9s0f0:
+ mac: "0c:c4:7a:6c:83:54"
+ mode: "static"
+ ip: "172.16.49.69"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.119"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd001: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:55:6a:d4"
+ interfaces:
+ enp2s0f0:
+ mac: "0c:c4:7a:55:6a:d4"
+ mode: "static"
+ ip: "172.16.49.70"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.243"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd002: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:55:6a:56"
+ interfaces:
+ enp2s0f0:
+ mac: "0c:c4:7a:55:6a:56"
+ mode: "static"
+ ip: "172.16.49.71"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.244"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd003: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:55:6a:2a"
+ interfaces:
+ enp2s0f0:
+ mac: "0c:c4:7a:55:6a:2a"
+ mode: "static"
+ ip: "172.16.49.72"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.245"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp001: # #cz7345-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:54:a2:5f"
+ interfaces:
+ enp2s0f1:
+ mac: "0c:c4:7a:54:a2:5f"
+ mode: "static"
+ ip: "172.16.49.73"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.233"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp002: # cz7346-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:54:a0:51"
+ interfaces:
+ enp2s0f1:
+ mac: "0c:c4:7a:54:a0:51"
+ mode: "static"
+ ip: "172.16.49.74"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.232"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+
+ mcp_common_scripts_branch: ''
+ mcp_version: proposed
+ offline_deployment: 'False'
+ opencontrail_analytics_address: 10.167.8.30
+ opencontrail_analytics_hostname: nal
+ opencontrail_analytics_node01_address: 10.167.8.31
+ opencontrail_analytics_node01_hostname: nal01
+ opencontrail_analytics_node02_address: 10.167.8.32
+ opencontrail_analytics_node02_hostname: nal02
+ opencontrail_analytics_node03_address: 10.167.8.33
+ opencontrail_analytics_node03_hostname: nal03
+ opencontrail_compute_iface_mask: '24'
+ opencontrail_control_address: 10.167.8.20
+ opencontrail_control_hostname: ntw
+ opencontrail_control_node01_address: 10.167.8.21
+ opencontrail_control_node01_hostname: ntw01
+ opencontrail_control_node02_address: 10.167.8.22
+ opencontrail_control_node02_hostname: ntw02
+ opencontrail_control_node03_address: 10.167.8.23
+ opencontrail_control_node03_hostname: ntw03
+ opencontrail_enabled: 'True'
+ opencontrail_router01_address: 10.167.8.220
+ opencontrail_router01_hostname: rtr01
+ opencontrail_router02_address: 10.167.8.101
+ opencontrail_router02_hostname: rtr02
+ openldap_enabled: 'False'
+ openssh_groups: ''
+ openstack_benchmark_node01_address: 10.167.8.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_single_address_ranges: 10.167.8.101-10.167.8.102
+ openstack_compute_deploy_address_ranges: 172.16.49.73-172.16.49.74
+ openstack_compute_tenant_address_ranges: 10.167.10.101-10.167.10.102
+ openstack_compute_backend_address_ranges: 10.167.10.101-10.167.10.102
+ openstack_control_address: 10.167.8.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.8.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.8.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.8.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.8.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.8.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.8.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.8.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_message_queue_address: 10.167.8.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.8.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.8.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.8.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: opencontrail
+ openstack_neutron_bgp_vpn: 'False'
+ openstack_neutron_bgp_vpn_driver: bagpipe
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_nova_compute_reserved_host_memory_mb: '900'
+ openstack_proxy_address: 10.167.8.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.8.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.8.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.8.19
+ openstack_version: queens
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_webhook_app_id: '24'
+ oss_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
+ salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
+ salt_master_address: 10.167.8.15
+ salt_master_hostname: cfg01
+ salt_master_management_address: 172.16.49.66
+ shared_reclass_branch: ''
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.8.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.8.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.8.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.8.63
+ stacklight_log_node03_hostname: log03
+ stacklight_long_term_storage_type: prometheus
+ stacklight_monitor_address: 10.167.8.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.8.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.8.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.8.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.8.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.8.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.8.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.8.88
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 192.168.0.220
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 192.168.0.0/24
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
+ openldap_domain: cookied-cicd-bm-queens-contrail-maas.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ ceph_enabled: 'True'
+ ceph_version: "luminous"
+ ceph_hyper_converged: "False"
+ ceph_osd_backend: "bluestore"
+ ceph_osd_count: "3"
+ ceph_osd_node_count: 3
+ ceph_osd_block_db_size: 20
+ ceph_osd_journal_size: 20
+ ceph_osd_bond_mode: "active-backup"
+ ceph_osd_data_partition_prefix: ""
+
+ ceph_public_network_allocation: storage
+ ceph_public_network: "10.167.8.0/24"
+ ceph_cluster_network: "10.167.8.0/24"
+
+# for 2018.11.0+
+ ceph_osd_single_address_ranges: "10.167.8.200-10.167.8.202"
+ ceph_osd_deploy_address_ranges: "172.16.49.70-172.16.49.72"
+ ceph_osd_storage_address_ranges: "10.167.8.200-10.167.8.202"
+ ceph_osd_backend_address_ranges: "10.167.10.200-10.167.10.202"
+
+ ceph_osd_data_disks: "/dev/sdb"
+ ceph_osd_journal_or_block_db_disks: "/dev/sdb"
+ ceph_osd_mode: "separated"
+ ceph_osd_deploy_nic: "eth0"
+ ceph_osd_primary_first_nic: "eth1"
+ ceph_osd_primary_second_nic: "eth2"
+ #ceph_mon_node01_address: "172.16.47.66"
+ #ceph_mon_node01_deploy_address: "172.16.48.66"
+ ceph_mon_node01_address: "10.167.8.66"
+ ceph_mon_node01_hostname: "cmn01"
+ #ceph_mon_node02_address: "172.16.47.67"
+ #ceph_mon_node02_deploy_address: "172.16.48.67"
+ ceph_mon_node02_address: "10.167.8.67"
+ ceph_mon_node02_hostname: "cmn02"
+ #ceph_mon_node03_address: "172.16.47.68"
+ #ceph_mon_node03_deploy_address: "172.16.48.68"
+ ceph_mon_node03_address: "10.167.8.68"
+ ceph_mon_node03_hostname: "cmn03"
+ #ceph_rgw_address: "172.16.47.75"
+ ceph_rgw_address: "10.167.8.75"
+ #ceph_rgw_node01_address: "172.16.47.76"
+ #ceph_rgw_node01_deploy_address: "172.16.48.76"
+ ceph_rgw_node01_address: "10.167.8.76"
+ ceph_rgw_node01_hostname: "rgw01"
+ #ceph_rgw_node02_address: "172.16.47.77"
+ #ceph_rgw_node02_deploy_address: "172.16.48.77"
+ ceph_rgw_node02_address: "10.167.8.77"
+ ceph_rgw_node02_hostname: "rgw02"
+ #ceph_rgw_node03_address: "172.16.47.78"
+ #ceph_rgw_node03_deploy_address: "172.16.48.78"
+ ceph_rgw_node03_address: "10.167.8.78"
+ ceph_rgw_node03_hostname: "rgw03"
+ manila_enabled: 'False'
+ barbican_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/salt-context-environment.yaml b/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/salt-context-environment.yaml
new file mode 100644
index 0000000..de08d36
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/salt-context-environment.yaml
@@ -0,0 +1,397 @@
+nodes:
+ # Virtual Control Plane nodes
+ cid01.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid02.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid03.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl01.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - linux_system_codename_xenial
+ classes:
+ - system.linux.system.repo.mcp.apt_mirantis.docker
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl02.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl03.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs01.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs02.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs03.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg01.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg02.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg03.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ prx01.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ prx02.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon01.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon02.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon03.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ nal01.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: opencontrail_analytics_node01
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ nal02.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: opencontrail_analytics_node02
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ nal03.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: opencontrail_analytics_node03
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ntw01.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: opencontrail_control_node01
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ntw02.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: opencontrail_control_node02
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ntw03.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: opencontrail_control_node03
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr01.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr02.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr03.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log01.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log02.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log03.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn01.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: ceph_mon_node01
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn02.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: ceph_mon_node02
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn03.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: ceph_mon_node03
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw01.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: ceph_rgw_node01
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw02.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: ceph_rgw_node02
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw03.cookied-cicd-bm-queens-contrail-maas.local:
+ reclass_storage_name: ceph_rgw_node03
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+# bmk01.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: openstack_benchmark_node01
+# roles:
+# - openstack_benchmark
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
diff --git a/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/salt.yaml b/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/salt.yaml
new file mode 100644
index 0000000..77f10d2
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/salt.yaml
@@ -0,0 +1,74 @@
+{% from 'cookied-cicd-bm-queens-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-bm-queens-contrail-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-bm-queens-contrail-maas/underlay.yaml' import DOMAIN_NAME with context %}
+
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+- description: Wait for salt-master is ready after configdrive user-data
+ cmd: |
+ timeout 120 salt-call test.ping
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: Generate a public key for machines in MAAS
+ cmd: |
+ ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub
+ maas mirantis sshkeys create key="$(cat ~root/.ssh/id_rsa.pub)"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Run comissioning of BM nodes
+ cmd: |
+ salt-call maas.process_machines
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Wait for machines ready
+ cmd: |
+ salt-call maas.machines_status && timeout 120 salt-call state.sls maas.machines.wait_for_ready
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 7, delay: 5}
+ skip_fail: false
+
+- description: Enforce the interfaces configuration defined in the model for servers
+ cmd: |
+ salt-call state.sls maas.machines.assign_ip;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Remove all the salt-minions and re-register the cfg01 minion
+ cmd: |
+ salt-key -y -D;
+ salt-call test.ping
+ sleep 5
+ # Check that the cfg01 is registered
+ salt-key | grep cfg01
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: provision the automatically commissioned physical nodes through MAAS
+ cmd: |
+ salt-call maas.deploy_machines;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Wait for machines deployed
+ cmd: |
+ salt-call maas.machines_status && timeout 300 salt-call state.sls maas.machines.wait_for_deployed
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 6, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/underlay.yaml b/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/underlay.yaml
new file mode 100644
index 0000000..54b9f63
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/underlay.yaml
@@ -0,0 +1,129 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
+
+#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-bm-queens-contrail-maas') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-cicd-bm-queens-contrail-maas_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
+ params:
+ ip_reserved:
+ gateway: '172.16.49.65'
+ l2_network_device: +61
+ default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+ ip_ranges:
+ dhcp: [+2, -3]
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ ip_ranges:
+ dhcp: [+2, -3]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ ip_ranges:
+ dhcp: [+2, -3]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: -2
+ ip_ranges:
+ dhcp: [+2, -3]
+
+ groups:
+
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+ network_pools:
+ admin: admin-pool01
+
+ l2_network_devices:
+ admin:
+ address_pool: admin-pool01
+ dhcp: false
+ forward:
+ mode: bridge
+ parent_iface:
+ phys_dev: !os_env LAB_MANAGEMENT_IFACE
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: bridge
+ parent_iface:
+ phys_dev: !os_env LAB_CONTROL_IFACE
+
+ group_volumes:
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
+ boot:
+ - hd
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: config
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
index 438696b..f288899 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
@@ -206,4 +206,6 @@
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
- vnf_onboarding_enabled: 'False'
\ No newline at end of file
+ vnf_onboarding_enabled: 'False'
+ manila_enabled: 'False'
+ barbican_enabled: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 4c43578..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo touch /is_cloud_init_started
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
-
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - echo "******** MOUNT CONFIG DRIVE"
- # Mount config drive
- - mkdir /root/config-drive
- - mount /dev/sr0 /root/config-drive
-
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- #- sudo ifdown ens3
- #- sudo ip r d default || true # remove existing default route to get it from dhcp
- #- sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- # Run user data script from config drive
- - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3; ip l set down ens3
- - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4; ip l set down ens4
- - rm -f /etc/network/interfaces
- #- ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
- #- cp /root/config-drive/user-data /root/user-data
- #- sed -i '/^reboot$/d' /root/user-data
- #- set -x; cd /root && /bin/bash -xe ./user-data
- - |
- set -x
- cd /root/config-drive
- if /bin/bash -xe ./user-data; then
- touch /is_cloud_init_finished
- else
- set +x
- echo "bootstrap script /root/config-drive/user-data failed\n" > /is_cloud_init_failed
- fi
-
- # Enable root access (after reboot)
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- #- path: /etc/network/interfaces
- - path: /root/interfaces
- content: |
- auto lo
- iface lo inet loopback
-
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 60
- ServerAliveCountMax 0
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604-swp.yaml
index 319c007..81936a4 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604-swp.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604-swp.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -57,3 +60,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604.yaml
index b1b6430..6451e34 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -57,3 +60,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
index 132a382..dbb578a 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
@@ -204,7 +204,7 @@
role: salt_master
params:
vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
- memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+ memory: {{ os_env('CFG_NODE_MEMORY', 12288) }}
boot:
- hd
volumes:
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
index b8bda7e..a531d95 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
@@ -172,4 +172,6 @@
tenant_network_subnet: 10.167.6.0/24
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
\ No newline at end of file
+ use_default_network_scheme: 'False'
+ manila_enabled: 'False'
+ barbican_enabled: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 4c43578..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo touch /is_cloud_init_started
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
-
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - echo "******** MOUNT CONFIG DRIVE"
- # Mount config drive
- - mkdir /root/config-drive
- - mount /dev/sr0 /root/config-drive
-
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- #- sudo ifdown ens3
- #- sudo ip r d default || true # remove existing default route to get it from dhcp
- #- sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- # Run user data script from config drive
- - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3; ip l set down ens3
- - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4; ip l set down ens4
- - rm -f /etc/network/interfaces
- #- ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
- #- cp /root/config-drive/user-data /root/user-data
- #- sed -i '/^reboot$/d' /root/user-data
- #- set -x; cd /root && /bin/bash -xe ./user-data
- - |
- set -x
- cd /root/config-drive
- if /bin/bash -xe ./user-data; then
- touch /is_cloud_init_finished
- else
- set +x
- echo "bootstrap script /root/config-drive/user-data failed\n" > /is_cloud_init_failed
- fi
-
- # Enable root access (after reboot)
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- #- path: /etc/network/interfaces
- - path: /root/interfaces
- content: |
- auto lo
- iface lo inet loopback
-
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 60
- ServerAliveCountMax 0
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604-swp.yaml
index 319c007..81936a4 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604-swp.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604-swp.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -57,3 +60,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604.yaml
index b1b6430..6451e34 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -57,3 +60,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
index 81a8afa..5b87397 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
@@ -197,7 +197,7 @@
role: salt_master
params:
vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
- memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+ memory: {{ os_env('CFG_NODE_MEMORY', 12288) }}
boot:
- hd
volumes:
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
index 7352614..c11154d 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
@@ -177,10 +177,10 @@
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
vnf_onboarding_enabled: 'False'
-
- kubernetes_network_calico_enabled: 'True'
kubernetes_network_flannel_enabled: 'True'
flannel_network: 10.20.0.0/16
kubernetes_network_genie_enabled: 'True'
kubernetes_genie_default_plugin: 'calico'
kubernetes_virtlet_enabled: 'True'
+ manila_enabled: 'False'
+ barbican_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 4c43578..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo touch /is_cloud_init_started
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
-
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - echo "******** MOUNT CONFIG DRIVE"
- # Mount config drive
- - mkdir /root/config-drive
- - mount /dev/sr0 /root/config-drive
-
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- #- sudo ifdown ens3
- #- sudo ip r d default || true # remove existing default route to get it from dhcp
- #- sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- # Run user data script from config drive
- - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3; ip l set down ens3
- - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4; ip l set down ens4
- - rm -f /etc/network/interfaces
- #- ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
- #- cp /root/config-drive/user-data /root/user-data
- #- sed -i '/^reboot$/d' /root/user-data
- #- set -x; cd /root && /bin/bash -xe ./user-data
- - |
- set -x
- cd /root/config-drive
- if /bin/bash -xe ./user-data; then
- touch /is_cloud_init_finished
- else
- set +x
- echo "bootstrap script /root/config-drive/user-data failed\n" > /is_cloud_init_failed
- fi
-
- # Enable root access (after reboot)
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- #- path: /etc/network/interfaces
- - path: /root/interfaces
- content: |
- auto lo
- iface lo inet loopback
-
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 60
- ServerAliveCountMax 0
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--user-data1604-swp.yaml
index 319c007..81936a4 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--user-data1604-swp.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--user-data1604-swp.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -57,3 +60,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--user-data1604.yaml
index b1b6430..6451e34 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--user-data1604.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -57,3 +60,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
index ee69506..1e1704a 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
@@ -183,7 +183,7 @@
role: salt_master
params:
vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
- memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+ memory: {{ os_env('CFG_NODE_MEMORY', 12288) }}
boot:
- hd
volumes:
diff --git a/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
new file mode 100644
index 0000000..f2553b9
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -0,0 +1,571 @@
+default_context:
+ backup_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEogIBAAKCAQEAvDqzt/PHWvSSJrBtvD3UWp21CDrAajgOPeXrVm7VU+sDDpw0
+ YqDvVhYfT/N6ha+SWOK00KyTuMMbB8/I4tvsP9vvCXy7v2AJID/ZO2z/t8KfTDEJ
+ C75/6a0UBg6sl3i7+cUOHbeK+lkcfdnSI1H8Jzdhk4Xj7t7q+MIKTs5n+AlScgyz
+ NSiD2nG/U5LmaQ+rjZ1VsF9J0YTds0yLDF3hztVoDTs7j5obl7Xab3ZlwalgH5Gc
+ Id6BI09jkUbppdPzHpzV2oad7cFpVYTt9m3/MMT0amzPuwl/u/dI64rRuWPe60eT
+ qeVMQD0zP6o9F79upbzQStt82lPJcfF4CXvxYwIDAQABAoIBAAHUXDzUfMKQj/8a
+ RebHfxHmaIUM9SPTKahGXNQ5PY+UQDJbKFMxF0Jx8pn3VuCHxVdh1LoWg1UPaGra
+ BSzXUGOKgrdH5BdHGq+aj0T5mT6zAJNgAqN/lYSy7vfkGp9aSBF0bd+yEgK+7Pz4
+ Kge320iSTDt/2KhQuF30nN8JOI97m2uk2YHH8TixfVtmgLPEy+0Mw4VZLsHD4OY1
+ zu8xN6co2aQR0DB0MPKD6IxH62wSOJKBzF4o5xzzy/fl0ysDZbZ8Z/5Rejvp3yNT
+ 68B0X5CM27hVdYE+/tcKGl9WKmewIf3fTZUfBcwFIObMIl9fkK/519nwFed4AfOX
+ /a2LCBECgYEA9Lyl/eyzXuU2dgs6Gw/WMobqOVnHF9wbukS1XSKdwMogtpt7Pb23
+ +32r9xHgeRDvvWwSp8lNPZ8mu77dQ6akbOuOk5C6V3Mqt4zam3DBDMtL63nKq8tq
+ LQ0PVjj8cAgu3GSDCz7htqUb44rn5tX9zlM0vrwxzyYqbet7ZbsGoYsCgYEAxORQ
+ JFn1vwag8VBw3bngx3SJ46CpCC8Gz830W7pEaTS6zTTiDC4p5sATGya91JS8l47G
+ ikP2bcWzvT6aP/u+TZSqZiqp5Kn37fx8Et+ltIl47SH+PJHIR9F9r2f9zqla6mlP
+ zcX/mTSuAJCTP4whQA3/f1wNAeBnewhK9fXCOokCgYAz6TPYSXW+giXsIfOAO/q2
+ GvHhmk5lnDVxbBOAHtCRTQ5lTVN1xCEbQgvQy0TuyQ3hAuRuHH+6u2BO4Gw0Zkto
+ IwrJ+B/eXrpH1qOj5uW73f9Lgjjf+bSau7NuGYZKCSJPcy5smzjrMdhZimQoDWnZ
+ csK0VlzGUJUdXZ599I6ygwKBgGTf+LN3J7H0Snb4WKsw9Zoa+h6WjKO1vE6xXVW1
+ rCEes+o5Autsp2ki1WcexTlp7unTa6MhSNta5Ei8Dzli2FBVL6xihWKzNmRG7Kaa
+ 0QIbQMp1lRUhN7Sb/0HkDKRaHktlI07w95Bd7hw59kcjm1F/Gnz9A2kHuNzPFeDI
+ RffJAoGAdeCID5sb0oHEHTIxxB+cgfaiyaAe9qrW2INNWLVn5OTDh6cidatnWAor
+ M/SxwNoiYcCpi869q7wzjw5gNOVoNJbmwzDA7s+lgjTPQpq2jmO6RtweKbYoN5Zw
+ ++LiD3r07TD3p2QAyeooT29D/d6/2Hd6oyTJcZWIQTN+MTcXQO4=
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8OrO388da9JImsG28PdRanbUIOsBqOA495etWbtVT6wMOnDRioO9WFh9P83qFr5JY4rTQrJO4wxsHz8ji2+w/2+8JfLu/YAkgP9k7bP+3wp9MMQkLvn/prRQGDqyXeLv5xQ4dt4r6WRx92dIjUfwnN2GThePu3ur4wgpOzmf4CVJyDLM1KIPacb9TkuZpD6uNnVWwX0nRhN2zTIsMXeHO1WgNOzuPmhuXtdpvdmXBqWAfkZwh3oEjT2ORRuml0/MenNXahp3twWlVhO32bf8wxPRqbM+7CX+790jritG5Y97rR5Op5UxAPTM/qj0Xv26lvNBK23zaU8lx8XgJe/Fj
+ bmk_enabled: 'False'
+ cicd_control_node01_address: 10.167.11.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.11.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.11.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.11.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAshiE2vK11KH1/PHO9v5IcT1ol3kuAorv6IgW+1paT9w4pFnd
+ H2DHQxTJsZ629cig+ELVAKHQnkND2U++/DM20ai5ZfpOwlvd+dL95/FbGb62Ozxx
+ kxBjyc/Bbbs8LcZtS1SN+agdkjQG1StpckUbFppoJ9nzWgnEcdYdonQ6aThgd+YL
+ rAOX04s3cMlCflClQl3lGFo24Qdhk/Y4M5rodfqfD5NOSKEhYP/dTMunri8zB5bU
+ ifvOvCWUKUOxLjkx95raY82xMHUobMYk87RcLPcq8pyz96/FPoiLqxM1oznTKNiI
+ 0bW0xjf7FFjfLCjTapKZPRz8+Wkvzmzj35LLrwIDAQABAoIBADJoGCo0Kdy93nay
+ JgboX66VV+YPaUNU+aQR6JdJsmgKB4oU2S4JYTyORKveJSCZoV3C5LCiG/6/QRPf
+ q0mMYUaj/51qZCJEiCYuXqjoOgWmYcOQTwD10ZiMEc4yAU1fbQ22J9zyhTQdP5XU
+ DKtH/eu+1h35ZRQl0ZD6rjaNuP6QekJM6IVCC7XBaCG5+wSER9R25HbbDhdb7CwH
+ W1GP9IgISqy9Z3f4PQOyCUmn/O99lN8kry6ui7bCywRfITV6C+pn02DpMgzKZ8jn
+ 3yts1f2mIbYVxnahtCaI3QTag6wBsnFq+U0uIXrUGMeeRzg9N1Ur01qdJpIR9g0v
+ Nt7QUZkCgYEA4lEavsFitSll/33JY4q82nYpjXAxTON3xraUqNYn5Cde06gNi8n1
+ t9TCLUqDhSpvgEOyJE/uwo5LAj79Ce2EwLkCttNggqRXBlY5ZpljwfWmxZtuGm/z
+ BJaOtkaK/1diR/+Qn/fTMyPH5JIXuQ6/XF60W4MSbzPgY4GO1BDx+G0CgYEAyXRT
+ 00GDdiXbxQmzeHTO9Bg5y36Y1FEWDLnc89bpHPTR4sT/XCczerevy/l8jsdzZlnu
+ 5ZddfWMF7EGNo51Zbmi0oLQ7nzigoVFcnhFHRFoCP36T9mvJk7O8Ao3ttpl/J2r0
+ mFiaKi0lhmZVbNpmliKjWAMZJyt6I7AfYekcOwsCgYEA0W8MuQptNgkhgtX80ElL
+ iz9eJK12chjuds3vtG66a8CjWGtkXcB/y6bwKsmR/GHQ3XnIGSJ/aTwU3fc8YzuS
+ ZmbPxDDIVx2OCycv52p7jrqtoqC7u2tuEQji+Hs/lhxfrxEp3V+R6vlpunQX0AF9
+ xRU/ApDBNndjZ7I1YrprseECgYA+zx8HgaiMIJeZokGrb7fKkPcMBCeAItveEeDa
+ wYmito3txv/a6nn5a+XKkbmNBpBrO+To/j1ux33kQDf56Cgm7UxLwoXISa6DPUvE
+ GJ0AqZOD2mIldUu+2k3m+ftAcDEdyBIEobNHLRZDBgriSmGrs5b77NNdzAdjsxjF
+ vRlJKwKBgD8DcP/C9pABC2mRQyH//RTk6XZfiDY0L18lwH7acEdHlJiF1PTwvIHD
+ cj1nMyG2MxEiSt1E5O/YQ4Lo3sognFIb8keu7IYxEgLXhvWFR3RwaYCjrF4ZGfD2
+ +83eUFPZQvEwTY/8OCogzJQfs1CT8+pLdO9tZQbrAaxfmF6c48KN
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyGITa8rXUofX88c72/khxPWiXeS4Ciu/oiBb7WlpP3DikWd0fYMdDFMmxnrb1yKD4QtUAodCeQ0PZT778MzbRqLll+k7CW9350v3n8VsZvrY7PHGTEGPJz8Ftuzwtxm1LVI35qB2SNAbVK2lyRRsWmmgn2fNaCcRx1h2idDppOGB35gusA5fTizdwyUJ+UKVCXeUYWjbhB2GT9jgzmuh1+p8Pk05IoSFg/91My6euLzMHltSJ+868JZQpQ7EuOTH3mtpjzbEwdShsxiTztFws9yrynLP3r8U+iIurEzWjOdMo2IjRtbTGN/sUWN8sKNNqkpk9HPz5aS/ObOPfksuv
+ cluster_domain: cookied-cicd-ovs-maas.local
+ cluster_name: cookied-cicd-ovs-maas
+ compute_bond_mode: active-backup
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: zEFbUBMME6LFdiL0rJWFgHMdQGgywnDSE9vFYvHgEBeYHb4QJsDl3HqpdaTgqYlF
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.11.0/24
+ control_vlan: '2404'
+ cookiecutter_template_branch: proposed
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+ deploy_network_gateway: 172.16.164.1
+ deploy_network_netmask: 255.255.255.192
+ deploy_network_subnet: 172.16.164.0/26
+ deployment_type: physical
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
+ email_address: test@mirantis.com
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.11.241
+ infra_kvm01_deploy_address: 172.16.164.11
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.11.242
+ infra_kvm02_deploy_address: 172.16.164.12
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.11.243
+ infra_kvm03_deploy_address: 172.16.164.13
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.11.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_enabled: 'True'
+ maas_deploy_address: 172.16.164.2
+ maas_deploy_cidr: 172.16.164.0/26
+ maas_deploy_gateway: 172.16.164.1
+ maas_deploy_range_end: 172.16.164.55
+ maas_deploy_range_start: 172.16.164.15
+ maas_deploy_vlan: '0'
+ maas_dhcp_enabled: 'True'
+ maas_fabric_name: fabric-0
+ maas_hostname: cfg01
+ maas_manage_deploy_network: 'True'
+ maas_machines: |
+ kvm01: # #cz7050-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ # pxe_interface_mac:
+ pxe_interface_mac: "00:25:90:e3:37:2c"
+ interfaces:
+ enp3s0f0:
+ mac: "00:25:90:e3:37:2c"
+ mode: "static"
+ ip: "172.16.164.11"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "176.74.222.96"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm02: # #cz7049-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "00:25:90:e3:3b:86"
+ interfaces:
+ enp3s0f0:
+ mac: "00:25:90:e3:3b:86"
+ mode: "static"
+ ip: "172.16.164.12"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "176.74.222.94"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm03: # #cz7048-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "00:25:90:e3:37:34"
+ interfaces:
+ enp3s0f0:
+ mac: "00:25:90:e3:37:34"
+ mode: "static"
+ ip: "172.16.164.13"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "176.74.222.92"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ gtw01: # #cz7052-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "00:25:90:e3:37:12"
+ interfaces:
+ enp3s0f0:
+ mac: "00:25:90:e3:37:12"
+ mode: "static"
+ ip: "172.16.164.5"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "176.74.222.100"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ gtw02: # #cz7051-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "00:25:90:e3:3b:a4"
+ interfaces:
+ enp3s0f0:
+ mac: "00:25:90:e3:3b:a4"
+ mode: "static"
+ ip: "172.16.164.6"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "176.74.222.98"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ gtw03: # #cz7636-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:55:6a:28"
+ interfaces:
+ enp2s0f0:
+ mac: "0c:c4:7a:55:6a:28"
+ mode: "static"
+ ip: "172.16.164.7"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.241"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd001: # #cz7922-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6c:9b:16"
+ interfaces:
+ enp9s0f0:
+ mac: "0c:c4:7a:6c:9b:16"
+ mode: "static"
+ ip: "172.16.164.8"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.227.32"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd002: # #cz7915-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6d:3e:06"
+ interfaces:
+ enp9s0f0:
+ mac: "0c:c4:7a:6d:3e:06"
+ mode: "static"
+ ip: "172.16.164.9"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.227.25"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd003: # #cz7921-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6c:9b:94"
+ interfaces:
+ enp9s0f0:
+ mac: "0c:c4:7a:6c:9b:94"
+ mode: "static"
+ ip: "172.16.164.10"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.227.31"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp001: # #cz7913-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6c:9a:1a"
+ interfaces:
+ enp9s0f0:
+ mac: "0c:c4:7a:6c:9a:1a"
+ mode: "static"
+ ip: "172.16.164.3"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.227.23"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp002: # #cz7916-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6c:89:4a"
+ interfaces:
+ enp9s0f9:
+ mac: "0c:c4:7a:6c:89:4a"
+ mode: "static"
+ ip: "172.16.164.4"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.227.26"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+# cmp001: # #cz7115-kvm.host-telecom.com
+# distro_series: "xenial"
+# # hwe_kernel: "hwe-16.04"
+# pxe_interface_mac: "00:25:90:e4:19:58"
+# interfaces:
+# enp9s0f0:
+# mac: "00:25:90:e4:19:58"
+# mode: "static"
+# ip: "172.16.164.3"
+# subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+# gateway: ${_param:deploy_network_gateway}
+# power_parameters:
+# power_address: "185.8.58.66"
+# power_pass: ==IPMI_PASS==
+# power_type: ipmi
+# power_user: ==IPMI_USER==
+# cmp002: # #cz7116-kvm.host-telecom.com
+# distro_series: "xenial"
+# # hwe_kernel: "hwe-16.04"
+# pxe_interface_mac: "00:25:90:e4:28:6a"
+# interfaces:
+# enp9s0f9:
+# mac: "00:25:90:e4:28:6a"
+# mode: "static"
+# ip: "172.16.164.4"
+# subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+# gateway: ${_param:deploy_network_gateway}
+# power_parameters:
+# power_address: "185.8.58.67"
+# power_pass: ==IPMI_PASS==
+# power_type: ipmi
+# power_user: ==IPMI_USER==
+
+ mcp_version: proposed
+ mcp_common_scripts_branch: ''
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openldap_domain: cookied-cicd-ovs-maas.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ openstack_benchmark_node01_address: 10.167.11.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_single_address_ranges: 10.167.11.3-10.167.11.4
+ openstack_compute_deploy_address_ranges: 172.16.164.3-172.16.164.4
+ openstack_compute_tenant_address_ranges: 10.167.12.3-10.167.12.4
+ openstack_compute_backend_address_ranges: 10.167.12.3-10.167.12.4
+ openstack_control_address: 10.167.11.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.11.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.11.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.11.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.11.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.11.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.11.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.11.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_deploy_address: 172.16.164.5
+ openstack_gateway_node02_deploy_address: 172.16.164.6
+ openstack_gateway_node03_deploy_address: 172.16.164.7
+ openstack_gateway_node01_address: 10.167.11.224
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 10.167.12.5
+ openstack_gateway_node02_address: 10.167.11.225
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.167.12.6
+ openstack_gateway_node03_address: 10.167.11.226
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 10.167.12.7
+ openstack_message_queue_address: 10.167.11.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.11.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.11.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.11.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: ovs
+ openstack_neutron_qos: 'True'
+ openstack_neutron_vlan_aware_vms: 'True'
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_hugepages_count: '16'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_nfv_sriov_network: physnet2
+ openstack_nfv_sriov_numvfs: '7'
+ openstack_nfv_sriov_pf_nic: enp5s0f1
+ openstack_nova_cpu_pinning: 6,7,8,9,10,11
+ openstack_nova_compute_reserved_host_memory_mb: '900'
+ openstack_ovs_dvr_enabled: 'True'
+ openstack_ovs_encapsulation_type: vxlan
+ openstack_ovs_encapsulation_vlan_range: 2402:2406
+ openstack_proxy_address: 10.167.11.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.11.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.11.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.11.19
+ openstack_version: pike
+ cinder_version: ${_param:openstack_version}
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: HlcaUHzUnsWsg62uhF8ua5KEbqRbzijz
+ salt_api_password_hash: $6$qdIFillN$XnzP7oIXRcbroVch7nlthyrSekjKlWND8q2MtoMF3Wz2ymepjAOjyqpyR55nmbH9OQzS8EcQJ6sfr5hWKDesV1
+ salt_master_address: 10.167.11.2
+ salt_master_hostname: cfg01
+ salt_master_management_address: 172.16.164.2
+ shared_reclass_branch: proposed
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.11.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.11.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.11.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.11.63
+ stacklight_log_node03_hostname: log03
+ stacklight_long_term_storage_type: prometheus
+ stacklight_monitor_address: 10.167.11.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.11.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.11.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.11.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.11.96
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.11.97
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.11.98
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.11.99
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.167.12.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.167.12.0/24
+ tenant_vlan: '2406'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
+ sriov_network_subnet: 192.168.10.0/24
+ ceph_enabled: 'True'
+ ceph_version: "luminous"
+ ceph_hyper_converged: "False"
+ ceph_osd_backend: "bluestore"
+ ceph_osd_count: "3"
+ ceph_osd_node_count: 3
+ ceph_osd_block_db_size: 20
+ ceph_osd_journal_size: 20
+ ceph_osd_bond_mode: "active-backup"
+ ceph_osd_data_partition_prefix: ""
+ ceph_public_network_allocation: storage
+ ceph_public_network: "10.167.11.0/24"
+ ceph_cluster_network: "10.167.11.0/24"
+ ceph_osd_single_address_ranges: "10.167.11.200-10.167.11.202"
+ ceph_osd_deploy_address_ranges: "172.16.164.8-172.16.164.10"
+ ceph_osd_storage_address_ranges: "10.167.11.200-10.167.11.202"
+ ceph_osd_backend_address_ranges: "10.167.12.200-10.167.12.202"
+ ceph_osd_data_disks: "/dev/sdb"
+ ceph_osd_journal_or_block_db_disks: "/dev/sdb"
+ ceph_osd_mode: "separated"
+ ceph_osd_deploy_nic: "eth0"
+ ceph_osd_primary_first_nic: "eth1"
+ ceph_osd_primary_second_nic: "eth2"
+ ceph_mon_node01_address: "10.167.11.66"
+ ceph_mon_node01_hostname: "cmn01"
+ ceph_mon_node02_address: "10.167.11.67"
+ ceph_mon_node02_hostname: "cmn02"
+ ceph_mon_node03_address: "10.167.11.68"
+ ceph_mon_node03_hostname: "cmn03"
+ ceph_rgw_address: "10.167.11.75"
+ ceph_rgw_node01_address: "10.167.11.76"
+ ceph_rgw_node01_hostname: "rgw01"
+ ceph_rgw_node02_address: "10.167.11.77"
+ ceph_rgw_node02_hostname: "rgw02"
+ ceph_rgw_node03_address: "10.167.11.78"
+ ceph_rgw_node03_hostname: "rgw03"
+ rsync_fernet_rotation: 'True'
+ compute_padding_with_zeros: True
+ designate_backend: powerdns
+ designate_enabled: 'True'
+ openstack_dns_node01_address: 10.167.11.113
+ openstack_dns_node02_address: 10.167.11.114
+ nova_vnc_tls_enabled: 'True'
+ galera_ssl_enabled: 'True'
+ openstack_mysql_x509_enabled: 'True'
+ rabbitmq_ssl_enabled: 'True'
+ openstack_rabbitmq_x509_enabled: 'True'
+ openstack_internal_protocol: 'https'
+ tenant_telemetry_enabled: 'True'
+ gnocchi_aggregation_storage: ceph
+ openstack_telemetry_address: 10.167.11.83
+ openstack_telemetry_hostname: mdb
+ openstack_telemetry_node01_address: 10.167.11.84
+ openstack_telemetry_node01_hostname: mdb01
+ openstack_telemetry_node02_address: 10.167.11.85
+ openstack_telemetry_node02_hostname: mdb02
+ openstack_telemetry_node03_address: 10.167.11.86
+ openstack_telemetry_node03_hostname: mdb03
+ barbican_backend: dogtag
+ barbican_enabled: 'True'
+ barbican_integration_enabled: 'False'
+ openstack_barbican_address: 10.167.11.44
+ openstack_barbican_hostname: kmn
+ openstack_barbican_node01_address: 10.167.11.45
+ openstack_barbican_node01_hostname: kmn01
+ openstack_barbican_node02_address: 10.167.11.46
+ openstack_barbican_node02_hostname: kmn02
+ openstack_barbican_node03_address: 10.167.11.47
+ openstack_barbican_node03_hostname: kmn03
+ openstack_create_public_network: 'True'
+ openstack_public_neutron_subnet_gateway: 172.17.42.1
+ openstack_public_neutron_subnet_cidr: 172.17.42.0/26
+ openstack_public_neutron_subnet_allocation_start: 172.17.42.15
+ openstack_public_neutron_subnet_allocation_end: 172.17.42.55
+ backend_vlan: '2402'
+ storage_vlan: '2405' # not implemented yet, placeholder
+ kqueen_custom_mail_enabled: 'False'
+ kqueen_enabled: 'False'
+ manila_enabled: 'False'
+ openscap_enabled: 'True'
+ octavia_health_manager_node01_address: 192.168.1.10
+ octavia_health_manager_node02_address: 192.168.1.11
+ octavia_health_manager_node03_address: 192.168.1.12
+ octavia_manager_cluster: 'True'
+ octavia_hm_bind_ip: 192.168.1.12
+ octavia_lb_mgmt_cidr: 192.168.1.0/24
+ octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
+ octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
+ openstack_octavia_enabled: 'True'
+ octavia_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEAtjnPDJsQToHBtoqIo15mdSYpfi8z6DFMi8Gbo0KCN33OUn5u
+ OctbdtjUfeuhvI6px1SCnvyWi09Ft8eWwq+KwLCGKbUxLvqKltuJ7K3LIrGXkt+m
+ qZN4O9XKeVKfZH+mQWkkxRWgX2r8RKNV3GkdNtd74VjhP+R6XSKJQ1Z8b7eHM10v
+ 6IjTY/jPczjK+eyCeEj4qbSnV8eKlqLhhquuSQRmUO2DRSjLVdpdf2BB4/BdWFsD
+ YOmX7mb8kpEr9vQ+c1JKMXDwD6ehzyU8kE+1kVm5zOeEy4HdYIMpvUfN49P1anRV
+ 2ISQ1ZE+r22IAMKl0tekrGH0e/1NP1DF5rINMwIDAQABAoIBAQCkP/cgpaRNHyg8
+ ISKIHs67SWqdEm73G3ijgB+JSKmW2w7dzJgN//6xYUAnP/zIuM7PnJ0gMQyBBTMS
+ NBTv5spqZLKJZYivj6Tb1Ya8jupKm0jEWlMfBo2ZYVrfgFmrfGOfEebSvmuPlh9M
+ vuzlftmWVSSUOkjODmM9D6QpzgrbpktBuA/WpX+6esMTwJpOcQ5xZWEnHXnVzuTc
+ SncodVweE4gz6F1qorbqIJz8UAUQ5T0OZTdHzIS1IbamACHWaxQfixAO2s4+BoUK
+ ANGGZWkfneCxx7lthvY8DiKn7M5cSRnqFyDToGqaLezdkMNlGC7v3U11FF5blSEW
+ fL1o/HwBAoGBAOavhTr8eqezTchqZvarorFIq7HFWk/l0vguIotu6/wlh1V/KdF+
+ aLLHgPgJ5j+RrCMvTBoKqMeeHfVGrS2udEy8L1mK6b3meG+tMxU05OA55abmhYn7
+ 7vF0q8XJmYIHIXmuCgF90R8Piscb0eaMlmHW9unKTKo8EOs5j+D8+AMJAoGBAMo4
+ 8WW+D3XiD7fsymsfXalf7VpAt/H834QTbNZJweUWhg11eLutyahyyfjjHV200nNZ
+ cnU09DWKpBbLg7d1pyT69CNLXpNnxuWCt8oiUjhWCUpNqVm2nDJbUdlRFTzYb2fS
+ ZC4r0oQaPD5kMLSipjcwzMWe0PniySxNvKXKInFbAoGBAKxW2qD7uKKKuQSOQUft
+ aAksMmEIAHWKTDdvOA2VG6XvX5DHBLXmy08s7rPfqW06ZjCPCDq4Velzvgvc9koX
+ d/lP6cvqlL9za+x6p5wjPQ4rEt/CfmdcmOE4eY+1EgLrUt314LHGjjG3ScWAiirE
+ QyDrGOIGaYoQf89L3KqIMr0JAoGARYAklw8nSSCUvmXHe+Gf0yKA9M/haG28dCwo
+ 780RsqZ3FBEXmYk1EYvCFqQX56jJ25MWX2n/tJcdpifz8Q2ikHcfiTHSI187YI34
+ lKQPFgWb08m1NnwoWrY//yx63BqWz1vjymqNQ5GwutC8XJi5/6Xp+tGGiRuEgJGH
+ EIPUKpkCgYAjBIVMkpNiLCREZ6b+qjrPV96ed3iTUt7TqP7yGlFI/OkORFS38xqC
+ hBP6Fk8iNWuOWQD+ohM/vMMnvIhk5jwlcwn+kF0ra04gi5KBFWSh/ddWMJxUtPC1
+ 2htvlEc6zQAR6QfqXHmwhg1hP81JcpqpicQzCMhkzLoR1DC6stXdLg==
+ -----END RSA PRIVATE KEY-----
+ octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2Oc8MmxBOgcG2ioijXmZ1Jil+LzPoMUyLwZujQoI3fc5Sfm45y1t22NR966G8jqnHVIKe/JaLT0W3x5bCr4rAsIYptTEu+oqW24nsrcsisZeS36apk3g71cp5Up9kf6ZBaSTFFaBfavxEo1XcaR0213vhWOE/5HpdIolDVnxvt4czXS/oiNNj+M9zOMr57IJ4SPiptKdXx4qWouGGq65JBGZQ7YNFKMtV2l1/YEHj8F1YWwNg6ZfuZvySkSv29D5zUkoxcPAPp6HPJTyQT7WRWbnM54TLgd1ggym9R83j0/VqdFXYhJDVkT6vbYgAwqXS16SsYfR7/U0/UMXmsg0z
diff --git a/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-environment.yaml b/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-environment.yaml
new file mode 100644
index 0000000..aa0d838
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-environment.yaml
@@ -0,0 +1,108 @@
+nodes:
+ cfg01.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ - features_runtest_cfg
+ interfaces:
+ ens3:
+ role: single_static_mgm
+ ens4:
+ role: single_static_ctl
+
+ # Physical nodes
+ kvm01.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp3s0f0:
+ role: single_mgm_dhcp
+ enp3s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ kvm02.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp3s0f0:
+ role: single_mgm_dhcp
+ enp3s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ kvm03.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp3s0f0:
+ role: single_mgm_dhcp
+ enp3s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ osd<<count>>:
+ reclass_storage_name: ceph_osd_rack01
+ roles:
+ - ceph_osd
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_dhcp
+ enp9s0f1:
+ role: single_vlan_ctl
+# role: bond0_ab_vlan_ceph_storage_backend
+
+ cmp<<count>>:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm_dhcp
+ enp9s0f1:
+ role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+
+ gtw01.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ enp3s0f0:
+ role: single_mgm_dhcp
+ enp3s0f1:
+ role: bond0_ab_dvr_vxlan_ctl_mesh_external
+ external_address: 172.17.42.5
+ external_network_netmask: 255.255.255.192
+
+ gtw02.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_gateway_node02
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ enp3s0f0:
+ role: single_mgm_dhcp
+ enp3s0f1:
+ role: bond0_ab_dvr_vxlan_ctl_mesh_external
+ external_address: 172.17.42.6
+ external_network_netmask: 255.255.255.192
+
+ gtw03.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_gateway_node03
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_mgm_dhcp
+ enp2s0f1:
+ role: bond0_ab_dvr_vxlan_ctl_mesh_external
+ external_address: 172.17.42.7
+ external_network_netmask: 255.255.255.192
diff --git a/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-vcp-environment.yaml b/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-vcp-environment.yaml
new file mode 100644
index 0000000..e9461ca
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-vcp-environment.yaml
@@ -0,0 +1,407 @@
+nodes:
+ ctl01.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl02.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl03.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs01.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs02.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs03.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg01.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg02.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg03.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ prx01.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ prx02.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid01.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid02.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid03.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon01.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon02.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon03.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr01.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr02.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr03.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log01.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log02.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log03.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn01.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: ceph_mon_node01
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn02.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: ceph_mon_node02
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn03.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: ceph_mon_node03
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw01.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: ceph_rgw_node01
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw02.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: ceph_rgw_node02
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw03.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: ceph_rgw_node03
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mdb01.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_telemetry_node01
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mdb02.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_telemetry_node02
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mdb03.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_telemetry_node03
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dns01.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_dns_node01
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dns02.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_dns_node02
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ kmn01.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_barbican_node01
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ kmn02.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_barbican_node02
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ kmn03.cookied-cicd-ovs-maas.local:
+ reclass_storage_name: openstack_barbican_node03
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-cicd-ovs-maas/salt.yaml b/tcp_tests/templates/cookied-cicd-ovs-maas/salt.yaml
new file mode 100644
index 0000000..021401f
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-ovs-maas/salt.yaml
@@ -0,0 +1,72 @@
+{% from 'cookied-cicd-ovs-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-ovs-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-ovs-maas/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+- description: Wait for salt-master is ready after configdrive user-data
+ cmd: |
+ timeout 120 salt-call test.ping
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: Generate a public key for machines in MAAS
+ cmd: |
+ ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub
+ maas mirantis sshkeys create key="$(cat ~root/.ssh/id_rsa.pub)"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Run comissioning of BM nodes
+ cmd: |
+ salt-call maas.process_machines
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Wait for machines ready
+ cmd: |
+ salt-call maas.machines_status && timeout 120 salt-call state.sls maas.machines.wait_for_ready
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 7, delay: 5}
+ skip_fail: false
+
+- description: Enforce the interfaces configuration defined in the model for servers
+ cmd: |
+ salt-call state.sls maas.machines.assign_ip;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Remove all the salt-minions and re-register the cfg01 minion
+ cmd: |
+ salt-key -y -D;
+ salt-call test.ping
+ sleep 5
+ # Check that the cfg01 is registered
+ salt-key | grep cfg01
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: provision the automatically commissioned physical nodes through MAAS
+ cmd: |
+ salt-call maas.deploy_machines;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Wait for machines deployed
+ cmd: |
+ salt-call maas.machines_status && timeout 300 salt-call state.sls maas.machines.wait_for_deployed
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 6, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-cicd-ovs-maas/underlay.yaml b/tcp_tests/templates/cookied-cicd-ovs-maas/underlay.yaml
new file mode 100644
index 0000000..b4a84ea
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-ovs-maas/underlay.yaml
@@ -0,0 +1,127 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-ovs-maas') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.164.2') %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-cicd-ovs-maas_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.164.0/26:26') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+ ip_ranges:
+ dhcp: [+2, -4]
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.11.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ ip_ranges:
+ dhcp: [+2, -4]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.12.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ ip_ranges:
+ dhcp: [+2, -4]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.0/26:26') }}
+ params:
+ ip_reserved:
+ gateway: '172.17.42.1'
+ ip_ranges:
+ dhcp: ['172.17.42.10', '172.17.42.20']
+
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+
+ l2_network_devices:
+ # maas management interface
+ admin:
+ address_pool: admin-pool01
+ dhcp: false
+ forward:
+ mode: bridge
+ parent_iface:
+ phys_dev: !os_env LAB_MANAGEMENT_IFACE
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: bridge
+ parent_iface:
+ phys_dev: !os_env LAB_CONTROL_IFACE
+
+ group_volumes:
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
+ boot:
+ - hd
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: config
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
index 53c9687..2016eed 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
@@ -204,3 +204,10 @@
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
+ openstack_create_public_network: 'False'
+ openstack_public_neutron_subnet_gateway: 172.17.16.1
+ openstack_public_neutron_subnet_cidr: 172.17.16.0/24
+ openstack_public_neutron_subnet_allocation_start: 172.17.16.201
+ openstack_public_neutron_subnet_allocation_end: 172.17.16.245
+ manila_enabled: 'False'
+ barbican_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml
index bef76b3..26f7983 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml
@@ -4,6 +4,7 @@
roles:
- infra_config
- linux_system_codename_xenial
+ - features_runtest
interfaces:
ens3:
role: single_dhcp
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml
index ae2e235..64abc07 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml
@@ -18,17 +18,6 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: "Workaround to avoid reboot cmp nodes: apply patch to bring OVS interfaces UP (PROD-24343)"
- cmd: |
- set -ex
- git clone https://gerrit.mcp.mirantis.com/salt-formulas/linux /root/salt-formula-linux
- cd /root/salt-formula-linux
- git fetch https://gerrit.mcp.mirantis.com/salt-formulas/linux refs/changes/32/29432/11 && git checkout FETCH_HEAD
- cp -r /root/salt-formula-linux/linux/ /srv/salt/env/prd/
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 4c43578..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo touch /is_cloud_init_started
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
-
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - echo "******** MOUNT CONFIG DRIVE"
- # Mount config drive
- - mkdir /root/config-drive
- - mount /dev/sr0 /root/config-drive
-
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- #- sudo ifdown ens3
- #- sudo ip r d default || true # remove existing default route to get it from dhcp
- #- sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- # Run user data script from config drive
- - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3; ip l set down ens3
- - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4; ip l set down ens4
- - rm -f /etc/network/interfaces
- #- ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
- #- cp /root/config-drive/user-data /root/user-data
- #- sed -i '/^reboot$/d' /root/user-data
- #- set -x; cd /root && /bin/bash -xe ./user-data
- - |
- set -x
- cd /root/config-drive
- if /bin/bash -xe ./user-data; then
- touch /is_cloud_init_finished
- else
- set +x
- echo "bootstrap script /root/config-drive/user-data failed\n" > /is_cloud_init_failed
- fi
-
- # Enable root access (after reboot)
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- #- path: /etc/network/interfaces
- - path: /root/interfaces
- content: |
- auto lo
- iface lo inet loopback
-
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 60
- ServerAliveCountMax 0
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--user-data1604-swp.yaml
index 319c007..81936a4 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--user-data1604-swp.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--user-data1604-swp.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -57,3 +60,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml
index c114631..ba916b3 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml
@@ -215,7 +215,7 @@
role: salt_master
params:
vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
- memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+ memory: {{ os_env('CFG_NODE_MEMORY', 12288) }}
boot:
- hd
volumes:
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
similarity index 63%
rename from tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
rename to tcp_tests/templates/cookied-cicd-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
index 22e4442..30671a1 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
@@ -68,9 +68,46 @@
ceph_rgw_node03_address: 10.167.4.78
ceph_rgw_node03_hostname: rgw03
ceph_version: luminous
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-pike-dvr-ceph.local
- cluster_name: cookied-mcp-pike-dvr-ceph
+ cicd_control_node01_address: 10.167.4.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.4.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.4.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.4.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
+ 3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
+ AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
+ xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
+ B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
+ q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
+ s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
+ V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
+ 9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
+ pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
+ MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
+ 7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
+ udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
+ R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
+ XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
+ Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
+ KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
+ 6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
+ ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
+ ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
+ Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
+ r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
+ mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
+ qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
+ 9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
+ cluster_domain: cookied-cicd-pike-dvr-ceph.local
+ cluster_name: cookied-cicd-pike-dvr-ceph
compute_bond_mode: active-backup
compute_padding_with_zeros: 'False'
compute_primary_first_nic: eth1
@@ -88,7 +125,7 @@
deployment_type: physical
dns_server01: 172.18.176.6
dns_server02: 172.18.208.44
- email_address: obutenko@mirantis.com
+ email_address: test@mirantis.com
gainsight_service_enabled: 'False'
gateway_primary_first_nic: eth1
gateway_primary_second_nic: eth2
@@ -122,20 +159,22 @@
mcp_common_scripts_branch: 'proposed'
mcp_version: proposed
no_platform: 'False'
- nova_vnc_tls_enabled: 'True'
offline_deployment: 'False'
opencontrail_enabled: 'False'
- openssh_groups: ''
+ openldap_domain: ${_param:cluster_name}.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ openssh_groups: cicd
openstack_benchmark_node01_address: 10.167.4.95
openstack_benchmark_node01_hostname: bmk01
openstack_cluster_size: compact
openstack_compute_count: '2'
openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
+ openstack_compute_rack01_single_subnet: 10.167.4
+ openstack_compute_rack01_tenant_subnet: 10.167.6
+ openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
+ openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
+ openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
openstack_control_address: 10.167.4.10
openstack_control_hostname: ctl
openstack_control_node01_address: 10.167.4.11
@@ -181,11 +220,12 @@
openstack_nova_compute_reserved_host_memory_mb: '900'
openstack_ovs_dvr_enabled: 'True'
openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 10.167.4.80
+ openstack_proxy_address: 172.17.16.80 # external network endpoint
+ openstack_proxy_vip_interface: ens5
openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.4.81
+ openstack_proxy_node01_address: 10.167.4.121
openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.4.82
+ openstack_proxy_node02_address: 10.167.4.122
openstack_proxy_node02_hostname: prx02
openstack_upgrade_node01_address: 10.167.4.19
openstack_version: pike
@@ -203,23 +243,62 @@
shared_reclass_branch: 'proposed'
shared_reclass_url: https://github.com/Mirantis/reclass-system-salt-model.git
sriov_network_subnet: 10.55.0.0/16
- stacklight_enabled: 'False'
- stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
tenant_network_gateway: 10.167.6.1
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: 10.167.6.0/24
- tenant_telemetry_enabled: 'True'
+ tenant_telemetry_enabled: 'False'
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'True'
version: proposed
vnf_onboarding_enabled: 'False'
- openstack_telemetry_address: 172.16.10.83
+ openstack_telemetry_address: 10.167.4.83
openstack_telemetry_hostname: mdb
- openstack_telemetry_node01_address: 172.16.10.84
+ openstack_telemetry_node01_address: 10.167.4.84
openstack_telemetry_node01_hostname: mdb01
- openstack_telemetry_node02_address: 172.16.10.85
+ openstack_telemetry_node02_address: 10.167.4.85
openstack_telemetry_node02_hostname: mdb02
- openstack_telemetry_node03_address: 172.16.10.86
- openstack_telemetry_node03_hostname: mdb03
\ No newline at end of file
+ openstack_telemetry_node03_address: 10.167.4.86
+ openstack_telemetry_node03_hostname: mdb03
+ fluentd_enabled: 'True'
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.4.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.4.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.4.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.4.63
+ stacklight_log_node03_hostname: log03
+ stacklight_monitor_address: 10.167.4.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.4.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.4.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.4.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.4.50
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.4.51
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.4.52
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.4.53
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ stacklight_long_term_storage_type: prometheus
+ nova_vnc_tls_enabled: 'True'
+ galera_ssl_enabled: 'True'
+ openstack_mysql_x509_enabled: 'True'
+ rabbitmq_ssl_enabled: 'True'
+ openstack_rabbitmq_x509_enabled: 'True'
+ openstack_internal_protocol: 'https'
+ openstack_create_public_network: 'True'
+ openstack_public_neutron_subnet_gateway: 172.17.16.1
+ openstack_public_neutron_subnet_cidr: 172.17.16.0/24
+ openstack_public_neutron_subnet_allocation_start: 172.17.16.201
+ openstack_public_neutron_subnet_allocation_end: 172.17.16.245
+ manila_enabled: 'False'
+ barbican_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-environment.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/environment_context.yaml
similarity index 63%
rename from tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-environment.yaml
rename to tcp_tests/templates/cookied-cicd-pike-dvr-ceph/environment_context.yaml
index 89bf918..7221980 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/environment_context.yaml
@@ -1,27 +1,58 @@
nodes:
- cfg01.mcp11-ovs-dpdk.local:
+ cfg01:
reclass_storage_name: infra_config_node01
roles:
- infra_config
- linux_system_codename_xenial
+ - features_runtest
+ classes:
+ - environment.cookied-cicd-pike-dvr-ceph.override_ntp_virtual
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_static_ctl
+
+ cid01:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
interfaces:
ens3:
role: single_dhcp
ens4:
role: single_ctl
- ctl01.mcp11-ovs-dpdk.local:
+ cid02:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid03:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl01:
reclass_storage_name: openstack_control_node01
roles:
- infra_kvm
- openstack_control_leader
- openstack_database_leader
- openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
- - features_designate_bind9_keystone
- - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -29,17 +60,13 @@
ens4:
role: single_ctl
- ctl02.mcp11-ovs-dpdk.local:
+ ctl02:
reclass_storage_name: openstack_control_node02
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
- - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -47,16 +74,13 @@
ens4:
role: single_ctl
- ctl03.mcp11-ovs-dpdk.local:
+ ctl03:
reclass_storage_name: openstack_control_node03
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9
- - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -64,63 +88,25 @@
ens4:
role: single_ctl
- prx01.mcp11-ovs-dpdk.local:
+ prx01:
reclass_storage_name: openstack_proxy_node01
roles:
- - openstack_proxy
- - features_designate_bind9_proxy
- linux_system_codename_xenial
interfaces:
ens3:
role: single_dhcp
ens4:
role: single_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
+ ens5:
+ role: single_external
+ external_address: 172.17.16.121
+ external_network_netmask: 255.255.255.0
# Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
+ cmp<<count>>:
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
- - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
@@ -132,11 +118,13 @@
ens6:
role: bond1_ab_ovs_floating
- gtw01.mcp11-ovs-dpdk.local:
+ gtw01:
reclass_storage_name: openstack_gateway_node01
roles:
- openstack_gateway
- linux_system_codename_xenial
+ classes:
+ - system.linux.system.repo.mcp.apt_mirantis.docker
interfaces:
ens3:
role: single_dhcp
@@ -147,7 +135,7 @@
ens6:
role: bond1_ab_ovs_floating
- osd<<count>>.mcp11-ovs-dpdk.local:
+ osd<<count>>:
reclass_storage_name: ceph_osd_rack01
roles:
- ceph_osd
@@ -158,7 +146,7 @@
ens4:
role: single_ctl
- cmn01.mcp11-ovs-dpdk.local:
+ cmn01:
reclass_storage_name: ceph_mon_node01
roles:
- ceph_mon
@@ -169,7 +157,7 @@
ens4:
role: single_ctl
- cmn02.mcp11-ovs-dpdk.local:
+ cmn02:
reclass_storage_name: ceph_mon_node02
roles:
- ceph_mon
@@ -180,7 +168,7 @@
ens4:
role: single_ctl
- cmn03.mcp11-ovs-dpdk.local:
+ cmn03:
reclass_storage_name: ceph_mon_node03
roles:
- ceph_mon
@@ -191,7 +179,7 @@
ens4:
role: single_ctl
- rgw01.mcp11-ovs-dpdk.local:
+ rgw01:
reclass_storage_name: ceph_rgw_node01
roles:
- ceph_rgw
@@ -202,7 +190,7 @@
ens4:
role: single_ctl
- rgw02.mcp11-ovs-dpdk.local:
+ rgw02:
reclass_storage_name: ceph_rgw_node02
roles:
- ceph_rgw
@@ -213,7 +201,7 @@
ens4:
role: single_ctl
- rgw03.cmcp11-ovs-dpdk.local:
+ rgw03:
reclass_storage_name: ceph_rgw_node03
roles:
- ceph_rgw
@@ -222,4 +210,103 @@
ens3:
role: single_dhcp
ens4:
- role: single_ctl
\ No newline at end of file
+ role: single_ctl
+
+ mon01:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon02:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon03:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log01:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log02:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log03:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr01:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr02:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr03:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/salt.yaml
new file mode 100644
index 0000000..4b9c68c
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/salt.yaml
@@ -0,0 +1,14 @@
+{% from 'cookied-cicd-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-pike-dvr-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-pike-dvr-ceph/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay--meta-data.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--meta-data.yaml
rename to tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay--user-data1604.yaml
similarity index 72%
rename from tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604-hwe.yaml
rename to tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay--user-data1604.yaml
index 319c007..81936a4 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604-hwe.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay--user-data1604.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -57,3 +60,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay.yaml
similarity index 71%
rename from tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
rename to tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay.yaml
index 502997f..c53173b 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay.yaml
@@ -1,62 +1,48 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-ocata-dvr-vxlan') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID = os_env('HOSTNAME_CID', 'cid.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM = os_env('HOSTNAME_KVM', 'kvm.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL = os_env('HOSTNAME_CTL', 'ctl.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS = os_env('HOSTNAME_DBS', 'dbs.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS01 = os_env('HOSTNAME_DBS01', 'dbs01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS02 = os_env('HOSTNAME_DBS02', 'dbs02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS03 = os_env('HOSTNAME_DBS03', 'dbs03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG = os_env('HOSTNAME_MSG', 'msg.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG01 = os_env('HOSTNAME_MSG01', 'msg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG02 = os_env('HOSTNAME_MSG02', 'msg02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG03 = os_env('HOSTNAME_MSG03', 'msg03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON = os_env('HOSTNAME_MON', 'mon.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG = os_env('HOSTNAME_LOG', 'log.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR = os_env('HOSTNAME_MTR', 'mtr.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_1604_HWE with context %}
+{% import 'cookied-cicd-pike-dvr-ceph/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-cicd-pike-dvr-ceph/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
aliases:
- &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_1604_hwe {{ CLOUDINIT_USER_DATA_1604_HWE }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-pike-dvr-ceph') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_RGW01 = os_env('HOSTNAME_RGW01', 'rgw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_RGW02 = os_env('HOSTNAME_RGW02', 'rgw02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_RGW03 = os_env('HOSTNAME_RGW03', 'rgw03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd2.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'cookied-cicd-pike-dvr-ceph_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
private-pool01:
@@ -66,54 +52,36 @@
gateway: +1
l2_network_device: +1
default_{{ HOSTNAME_CFG01 }}: +15
-
+ default_{{ HOSTNAME_CTL01 }}: +11
+ default_{{ HOSTNAME_CTL02 }}: +12
+ default_{{ HOSTNAME_CTL03 }}: +13
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
default_{{ HOSTNAME_CID }}: +90
default_{{ HOSTNAME_CID01 }}: +91
default_{{ HOSTNAME_CID02 }}: +92
default_{{ HOSTNAME_CID03 }}: +93
-
- default_{{ HOSTNAME_KVM }}: +240
- default_{{ HOSTNAME_KVM01 }}: +241
- default_{{ HOSTNAME_KVM02 }}: +242
- default_{{ HOSTNAME_KVM03 }}: +243
-
- default_{{ HOSTNAME_CTL }}: +10
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_GTW01 }}: +224
- default_{{ HOSTNAME_GTW02 }}: +225
- default_{{ HOSTNAME_GTW03 }}: +226
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_PRX }}: +80
- default_{{ HOSTNAME_PRX01 }}: +81
- default_{{ HOSTNAME_PRX02 }}: +82
- default_{{ HOSTNAME_BMK01 }}: +85
-
- default_{{ HOSTNAME_MON }}: +70
+ default_{{ HOSTNAME_OSD01 }}: +94
+ default_{{ HOSTNAME_OSD02 }}: +95
+ default_{{ HOSTNAME_CMN01 }}: +96
+ default_{{ HOSTNAME_CMN02 }}: +97
+ default_{{ HOSTNAME_CMN03 }}: +98
+ default_{{ HOSTNAME_RGW01 }}: +76
+ default_{{ HOSTNAME_RGW02 }}: +77
+ default_{{ HOSTNAME_RGW03 }}: +78
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
default_{{ HOSTNAME_MON01 }}: +71
default_{{ HOSTNAME_MON02 }}: +72
default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG }}: +60
default_{{ HOSTNAME_LOG01 }}: +61
default_{{ HOSTNAME_LOG02 }}: +62
default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR }}: +85
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
-
- default_{{ HOSTNAME_CMP01 }}: +101
- default_{{ HOSTNAME_CMP02 }}: +102
+ default_{{ HOSTNAME_MTR01 }}: +51
+ default_{{ HOSTNAME_MTR02 }}: +52
+ default_{{ HOSTNAME_MTR03 }}: +53
ip_ranges:
- dhcp: [+90, -10]
+ dhcp: [+10, -10]
admin-pool01:
net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
@@ -122,55 +90,36 @@
gateway: +1
l2_network_device: +1
default_{{ HOSTNAME_CFG01 }}: +15
-
+ default_{{ HOSTNAME_CTL01 }}: +11
+ default_{{ HOSTNAME_CTL02 }}: +12
+ default_{{ HOSTNAME_CTL03 }}: +13
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
default_{{ HOSTNAME_CID }}: +90
default_{{ HOSTNAME_CID01 }}: +91
default_{{ HOSTNAME_CID02 }}: +92
default_{{ HOSTNAME_CID03 }}: +93
-
- default_{{ HOSTNAME_KVM }}: +240
- default_{{ HOSTNAME_KVM01 }}: +241
- default_{{ HOSTNAME_KVM02 }}: +242
- default_{{ HOSTNAME_KVM03 }}: +243
-
- default_{{ HOSTNAME_CTL }}: +10
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_GTW01 }}: +224
- default_{{ HOSTNAME_GTW02 }}: +225
- default_{{ HOSTNAME_GTW03 }}: +226
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_PRX }}: +80
- default_{{ HOSTNAME_PRX01 }}: +81
- default_{{ HOSTNAME_PRX02 }}: +82
- default_{{ HOSTNAME_BMK01 }}: +85
-
- default_{{ HOSTNAME_MON }}: +70
+ default_{{ HOSTNAME_OSD01 }}: +94
+ default_{{ HOSTNAME_OSD02 }}: +95
+ default_{{ HOSTNAME_CMN01 }}: +96
+ default_{{ HOSTNAME_CMN02 }}: +97
+ default_{{ HOSTNAME_CMN03 }}: +98
+ default_{{ HOSTNAME_RGW01 }}: +76
+ default_{{ HOSTNAME_RGW02 }}: +77
+ default_{{ HOSTNAME_RGW03 }}: +78
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
default_{{ HOSTNAME_MON01 }}: +71
default_{{ HOSTNAME_MON02 }}: +72
default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG }}: +60
default_{{ HOSTNAME_LOG01 }}: +61
default_{{ HOSTNAME_LOG02 }}: +62
default_{{ HOSTNAME_LOG03 }}: +63
-
- default_{{ HOSTNAME_MTR }}: +85
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
-
- default_{{ HOSTNAME_CMP01 }}: +101
- default_{{ HOSTNAME_CMP02 }}: +102
+ default_{{ HOSTNAME_MTR01 }}: +51
+ default_{{ HOSTNAME_MTR02 }}: +52
+ default_{{ HOSTNAME_MTR03 }}: +53
ip_ranges:
- dhcp: [+90, -10]
+ dhcp: [+70, -10]
tenant-pool01:
net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
@@ -179,52 +128,30 @@
gateway: +1
l2_network_device: +1
default_{{ HOSTNAME_CFG01 }}: +15
-
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
-
- default_{{ HOSTNAME_KVM }}: +240
- default_{{ HOSTNAME_KVM01 }}: +241
- default_{{ HOSTNAME_KVM02 }}: +242
- default_{{ HOSTNAME_KVM03 }}: +243
-
- default_{{ HOSTNAME_CTL }}: +10
default_{{ HOSTNAME_CTL01 }}: +11
default_{{ HOSTNAME_CTL02 }}: +12
default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_GTW01 }}: +224
- default_{{ HOSTNAME_GTW02 }}: +225
- default_{{ HOSTNAME_GTW03 }}: +226
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_PRX }}: +80
- default_{{ HOSTNAME_PRX01 }}: +81
- default_{{ HOSTNAME_PRX02 }}: +82
- default_{{ HOSTNAME_BMK01 }}: +85
-
- default_{{ HOSTNAME_MON }}: +70
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_OSD01 }}: +94
+ default_{{ HOSTNAME_OSD02 }}: +95
+ default_{{ HOSTNAME_CMN01 }}: +96
+ default_{{ HOSTNAME_CMN02 }}: +97
+ default_{{ HOSTNAME_CMN03 }}: +98
+ default_{{ HOSTNAME_RGW01 }}: +76
+ default_{{ HOSTNAME_RGW02 }}: +77
+ default_{{ HOSTNAME_RGW03 }}: +78
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
default_{{ HOSTNAME_MON01 }}: +71
default_{{ HOSTNAME_MON02 }}: +72
default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG }}: +60
default_{{ HOSTNAME_LOG01 }}: +61
default_{{ HOSTNAME_LOG02 }}: +62
default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR }}: +85
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
-
- default_{{ HOSTNAME_CMP01 }}: +101
- default_{{ HOSTNAME_CMP02 }}: +102
+ default_{{ HOSTNAME_MTR01 }}: +51
+ default_{{ HOSTNAME_MTR02 }}: +52
+ default_{{ HOSTNAME_MTR03 }}: +53
ip_ranges:
dhcp: [+10, -10]
@@ -235,55 +162,32 @@
gateway: +1
l2_network_device: +1
default_{{ HOSTNAME_CFG01 }}: +15
-
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
-
- default_{{ HOSTNAME_KVM }}: +240
- default_{{ HOSTNAME_KVM01 }}: +241
- default_{{ HOSTNAME_KVM02 }}: +242
- default_{{ HOSTNAME_KVM03 }}: +243
-
- default_{{ HOSTNAME_CTL }}: +10
default_{{ HOSTNAME_CTL01 }}: +11
default_{{ HOSTNAME_CTL02 }}: +12
default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_GTW01 }}: +224
- default_{{ HOSTNAME_GTW02 }}: +225
- default_{{ HOSTNAME_GTW03 }}: +226
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_PRX }}: +80
- default_{{ HOSTNAME_PRX01 }}: +81
- default_{{ HOSTNAME_PRX02 }}: +82
- default_{{ HOSTNAME_BMK01 }}: +85
-
- default_{{ HOSTNAME_MON }}: +70
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_OSD01 }}: +94
+ default_{{ HOSTNAME_OSD02 }}: +95
+ default_{{ HOSTNAME_CMN01 }}: +96
+ default_{{ HOSTNAME_CMN02 }}: +97
+ default_{{ HOSTNAME_CMN03 }}: +98
+ default_{{ HOSTNAME_RGW01 }}: +76
+ default_{{ HOSTNAME_RGW02 }}: +77
+ default_{{ HOSTNAME_RGW03 }}: +78
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
default_{{ HOSTNAME_MON01 }}: +71
default_{{ HOSTNAME_MON02 }}: +72
default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG }}: +60
default_{{ HOSTNAME_LOG01 }}: +61
default_{{ HOSTNAME_LOG02 }}: +62
default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR }}: +85
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
-
- default_{{ HOSTNAME_CMP01 }}: +101
- default_{{ HOSTNAME_CMP02 }}: +102
+ default_{{ HOSTNAME_MTR01 }}: +51
+ default_{{ HOSTNAME_MTR02 }}: +52
+ default_{{ HOSTNAME_MTR03 }}: +53
ip_ranges:
- dhcp: [+10, -10]
-
+ dhcp: [+130, +230]
groups:
- name: default
@@ -311,18 +215,15 @@
forward:
mode: nat
- private_br:
- vlan_ifaces:
- - 10
-
private:
address_pool: private-pool01
dhcp: false
forward:
mode: route
- parent_iface:
- l2_net_dev: private_br
- tag: 10
+
+ tenant:
+ address_pool: tenant-pool01
+ dhcp: false
external:
address_pool: external-pool01
@@ -330,51 +231,42 @@
forward:
mode: route
-
group_volumes:
- name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ source_image: {{ os_env('MCP_IMAGE_PATH1604') }} # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
format: qcow2
- name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
+ - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
+ source_image: !os_env MCP_IMAGE_PATH1604
format: qcow2
nodes:
- name: {{ HOSTNAME_CFG01 }}
role: salt_master
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 8192
+ vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
+ memory: {{ os_env('CFG_NODE_MEMORY', 12288) }}
boot:
- hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
backing_store: cfg01_day01_image
format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
+ - name: config
capacity: 1
format: raw
device: cdrom
bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
interfaces:
- label: ens3
l2_network_device: admin
interface_model: *interface_model
- label: ens4
- l2_network_device: private_br
+ l2_network_device: private
interface_model: *interface_model
network_config:
ens3:
@@ -384,11 +276,11 @@
networks:
- private
- - name: {{ HOSTNAME_CID01 }}
+ - name: {{ HOSTNAME_CTL01 }}
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
boot:
- hd
cloud_init_volume_name: iso
@@ -412,7 +304,7 @@
l2_network_device: admin
interface_model: *interface_model
- label: ens4
- l2_network_device: private_br
+ l2_network_device: private
interface_model: *interface_model
network_config: &network_config
ens3:
@@ -422,322 +314,6 @@
networks:
- private
- - name: {{ HOSTNAME_CID02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- # KVM* nodes required for services like glusterfs.server
- - name: {{ HOSTNAME_KVM01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KVM02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KVM03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_DBS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_DBS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_DBS03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MSG01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MSG02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MSG03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- name: {{ HOSTNAME_CTL02 }}
role: salt_minion
params:
@@ -752,9 +328,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -781,7 +354,114 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CMN01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CMN02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CMN03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_OSD01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: ceph_osd
+ capacity: 50
+ format: qcow2
+ - name: ceph_journal
capacity: 50
format: qcow2
- name: iso # Volume with name 'iso' will be used
@@ -796,11 +476,11 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON01 }}
+ - name: {{ HOSTNAME_OSD02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
cloud_init_volume_name: iso
@@ -808,7 +488,13 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: ceph_osd
+ capacity: 50
+ format: qcow2
+ - name: ceph_journal
+ capacity: 50
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -822,11 +508,11 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON02 }}
+ - name: {{ HOSTNAME_RGW01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
cloud_init_volume_name: iso
@@ -834,7 +520,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
+ backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -848,11 +534,11 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON03 }}
+ - name: {{ HOSTNAME_RGW02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
cloud_init_volume_name: iso
@@ -860,7 +546,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
+ backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -873,12 +559,11 @@
interfaces: *interfaces
network_config: *network_config
-
- - name: {{ HOSTNAME_LOG01 }}
+ - name: {{ HOSTNAME_RGW03 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
cloud_init_volume_name: iso
@@ -886,137 +571,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
+ backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -1044,9 +599,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -1056,14 +608,32 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - external
- name: {{ HOSTNAME_CMP01 }}
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 3072
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
@@ -1080,7 +650,7 @@
device: cdrom
bus: ide
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
+ cloudinit_user_data: *cloudinit_user_data_1604
interfaces: &all_interfaces
@@ -1088,10 +658,10 @@
l2_network_device: admin
interface_model: *interface_model
- label: ens4
- l2_network_device: private_br
+ l2_network_device: private
interface_model: *interface_model
- label: ens5
- l2_network_device: private_br
+ l2_network_device: tenant
interface_model: *interface_model
- label: ens6
l2_network_device: external
@@ -1105,7 +675,7 @@
- private
ens5:
networks:
- - private
+ - tenant
ens6:
networks:
- external
@@ -1114,7 +684,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 3072
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
@@ -1131,7 +701,7 @@
device: cdrom
bus: ide
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
+ cloudinit_user_data: *cloudinit_user_data_1604
interfaces: *all_interfaces
network_config: *all_network_config
@@ -1139,7 +709,7 @@
- name: {{ HOSTNAME_GTW01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
+ vcpu: !os_env SLAVE_NODE_CPU, 4
memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
@@ -1157,7 +727,319 @@
device: cdrom
bus: ide
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
+ cloudinit_user_data: *cloudinit_user_data_1604
interfaces: *all_interfaces
network_config: *all_network_config
+
+ - name: {{ HOSTNAME_CID01 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID02 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID03 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
index b0c69e8..8fc7181 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
@@ -232,3 +232,10 @@
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
+ openstack_create_public_network: 'False'
+ openstack_public_neutron_subnet_gateway: 172.17.16.1
+ openstack_public_neutron_subnet_cidr: 172.17.16.0/24
+ openstack_public_neutron_subnet_allocation_start: 172.17.16.201
+ openstack_public_neutron_subnet_allocation_end: 172.17.16.245
+ manila_enabled: 'False'
+ barbican_enabled: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
index 77d8229..065d10f 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
@@ -4,6 +4,7 @@
roles:
- infra_config
- linux_system_codename_xenial
+ - features_runtest
classes:
- environment.cookied-cicd-pike-dvr-sl.override_ntp_virtual
interfaces:
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
index 12e013c..a38f2f3 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
@@ -9,17 +9,6 @@
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-- description: "Workaround to avoid reboot cmp nodes: apply patch to bring OVS interfaces UP (PROD-24343)"
- cmd: |
- set -ex
- git clone https://gerrit.mcp.mirantis.com/salt-formulas/linux /root/salt-formula-linux
- cd /root/salt-formula-linux
- git fetch https://gerrit.mcp.mirantis.com/salt-formulas/linux refs/changes/32/29432/11 && git checkout FETCH_HEAD
- cp -r /root/salt-formula-linux/linux/ /srv/salt/env/prd/
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 4c43578..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo touch /is_cloud_init_started
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
-
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - echo "******** MOUNT CONFIG DRIVE"
- # Mount config drive
- - mkdir /root/config-drive
- - mount /dev/sr0 /root/config-drive
-
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- #- sudo ifdown ens3
- #- sudo ip r d default || true # remove existing default route to get it from dhcp
- #- sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- # Run user data script from config drive
- - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3; ip l set down ens3
- - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4; ip l set down ens4
- - rm -f /etc/network/interfaces
- #- ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
- #- cp /root/config-drive/user-data /root/user-data
- #- sed -i '/^reboot$/d' /root/user-data
- #- set -x; cd /root && /bin/bash -xe ./user-data
- - |
- set -x
- cd /root/config-drive
- if /bin/bash -xe ./user-data; then
- touch /is_cloud_init_finished
- else
- set +x
- echo "bootstrap script /root/config-drive/user-data failed\n" > /is_cloud_init_failed
- fi
-
- # Enable root access (after reboot)
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- #- path: /etc/network/interfaces
- - path: /root/interfaces
- content: |
- auto lo
- iface lo inet loopback
-
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 60
- ServerAliveCountMax 0
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--user-data1604-swp.yaml
index 319c007..81936a4 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--user-data1604-swp.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--user-data1604-swp.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -57,3 +60,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
index 2cbbce6..256081f 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
@@ -220,7 +220,7 @@
role: salt_master
params:
vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
- memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+ memory: {{ os_env('CFG_NODE_MEMORY', 12288) }}
boot:
- hd
volumes:
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
index a5a862b..d459d32 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
@@ -262,8 +262,18 @@
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
openstack_octavia_enabled: 'True'
+ octavia_health_manager_node01_address: 192.168.1.10
+ octavia_health_manager_node02_address: 192.168.1.11
+ octavia_health_manager_node03_address: 192.168.1.12
+ octavia_manager_cluster: 'True'
octavia_hm_bind_ip: 192.168.1.12
octavia_lb_mgmt_cidr: 192.168.1.0/24
octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
-
+ openstack_create_public_network: 'False'
+ openstack_public_neutron_subnet_gateway: 172.17.16.1
+ openstack_public_neutron_subnet_cidr: 172.17.16.0/24
+ openstack_public_neutron_subnet_allocation_start: 172.17.16.201
+ openstack_public_neutron_subnet_allocation_end: 172.17.16.245
+ manila_enabled: 'False'
+ barbican_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml
index 6ffc515..be99dbb 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml
@@ -4,6 +4,7 @@
roles:
- infra_config
- linux_system_codename_xenial
+ - features_runtest
interfaces:
ens3:
role: single_dhcp
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
index 33440ad..4905e32 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
@@ -9,17 +9,6 @@
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-- description: "Workaround to avoid reboot cmp nodes: apply patch to bring OVS interfaces UP (PROD-24343)"
- cmd: |
- set -ex
- git clone https://gerrit.mcp.mirantis.com/salt-formulas/linux /root/salt-formula-linux
- cd /root/salt-formula-linux
- git fetch https://gerrit.mcp.mirantis.com/salt-formulas/linux refs/changes/32/29432/11 && git checkout FETCH_HEAD
- cp -r /root/salt-formula-linux/linux/ /srv/salt/env/prd/
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 4c43578..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo touch /is_cloud_init_started
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
-
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - echo "******** MOUNT CONFIG DRIVE"
- # Mount config drive
- - mkdir /root/config-drive
- - mount /dev/sr0 /root/config-drive
-
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- #- sudo ifdown ens3
- #- sudo ip r d default || true # remove existing default route to get it from dhcp
- #- sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- # Run user data script from config drive
- - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3; ip l set down ens3
- - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4; ip l set down ens4
- - rm -f /etc/network/interfaces
- #- ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
- #- cp /root/config-drive/user-data /root/user-data
- #- sed -i '/^reboot$/d' /root/user-data
- #- set -x; cd /root && /bin/bash -xe ./user-data
- - |
- set -x
- cd /root/config-drive
- if /bin/bash -xe ./user-data; then
- touch /is_cloud_init_finished
- else
- set +x
- echo "bootstrap script /root/config-drive/user-data failed\n" > /is_cloud_init_failed
- fi
-
- # Enable root access (after reboot)
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- #- path: /etc/network/interfaces
- - path: /root/interfaces
- content: |
- auto lo
- iface lo inet loopback
-
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 60
- ServerAliveCountMax 0
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data1604-swp.yaml
index 319c007..5a4fc79 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data1604-swp.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data1604-swp.yaml
@@ -17,6 +17,7 @@
root:r00tme
expire: False
+
bootcmd:
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
@@ -25,6 +26,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -57,3 +61,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml
index a7b966c..0fc0d86 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml
@@ -221,7 +221,7 @@
role: salt_master
params:
vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
- memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+ memory: {{ os_env('CFG_NODE_MEMORY', 12288) }}
boot:
- hd
volumes:
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml
index 0fa8367..c3c3ec9 100644
--- a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml
@@ -63,16 +63,16 @@
gateway_primary_second_nic: eth2
infra_bond_mode: active-backup
infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.4.241
- infra_kvm01_deploy_address: 10.167.5.91
+ infra_kvm01_control_address: 10.167.4.101
+ infra_kvm01_deploy_address: 10.167.5.101
infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.4.242
- infra_kvm02_deploy_address: 10.167.5.92
+ infra_kvm02_control_address: 10.167.4.102
+ infra_kvm02_deploy_address: 10.167.5.102
infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.4.243
- infra_kvm03_deploy_address: 10.167.5.93
+ infra_kvm03_control_address: 10.167.4.103
+ infra_kvm03_deploy_address: 10.167.5.103
infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.4.240
+ infra_kvm_vip_address: 10.167.4.100
infra_primary_first_nic: eth1
infra_primary_second_nic: eth2
kubernetes_enabled: 'False'
@@ -262,9 +262,19 @@
-----END RSA PRIVATE KEY-----
octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2Oc8MmxBOgcG2ioijXmZ1Jil+LzPoMUyLwZujQoI3fc5Sfm45y1t22NR966G8jqnHVIKe/JaLT0W3x5bCr4rAsIYptTEu+oqW24nsrcsisZeS36apk3g71cp5Up9kf6ZBaSTFFaBfavxEo1XcaR0213vhWOE/5HpdIolDVnxvt4czXS/oiNNj+M9zOMr57IJ4SPiptKdXx4qWouGGq65JBGZQ7YNFKMtV2l1/YEHj8F1YWwNg6ZfuZvySkSv29D5zUkoxcPAPp6HPJTyQT7WRWbnM54TLgd1ggym9R83j0/VqdFXYhJDVkT6vbYgAwqXS16SsYfR7/U0/UMXmsg0z
openstack_octavia_enabled: 'True'
+ octavia_health_manager_node01_address: 192.168.1.10
+ octavia_health_manager_node02_address: 192.168.1.11
+ octavia_health_manager_node03_address: 192.168.1.12
+ octavia_manager_cluster: 'True'
octavia_hm_bind_ip: 192.168.1.12
octavia_lb_mgmt_cidr: 192.168.1.0/24
octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
-
+ openstack_create_public_network: 'True'
+ openstack_public_neutron_subnet_gateway: 172.17.16.1
+ openstack_public_neutron_subnet_cidr: 172.17.16.0/24
+ openstack_public_neutron_subnet_allocation_start: 172.17.16.201
+ openstack_public_neutron_subnet_allocation_end: 172.17.16.245
+ manila_enabled: 'False'
+ barbican_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/environment_context.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/environment_context.yaml
index 59e20b9..97e8520 100644
--- a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/environment_context.yaml
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/environment_context.yaml
@@ -4,6 +4,7 @@
roles:
- infra_config
- linux_system_codename_xenial
+ - features_runtest
classes:
- environment.cookied-cicd-queens-dvr-sl.override_ntp_virtual
interfaces:
@@ -12,39 +13,6 @@
ens4:
role: single_static_ctl
- kvm01:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- kvm02:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- kvm03:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
cid01:
reclass_storage_name: cicd_control_node01
roles:
@@ -81,10 +49,10 @@
ctl01:
reclass_storage_name: openstack_control_node01
roles:
+ - infra_kvm
- openstack_control_leader
- openstack_database_leader
- openstack_message_queue
- - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -95,10 +63,10 @@
ctl02:
reclass_storage_name: openstack_control_node02
roles:
+ - infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
- - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -109,10 +77,10 @@
ctl03:
reclass_storage_name: openstack_control_node03
roles:
+ - infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
- - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/salt.yaml
index 62a8a23..b4c01a8 100644
--- a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/salt.yaml
@@ -9,17 +9,6 @@
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-- description: "Workaround to avoid reboot cmp nodes: apply patch to bring OVS interfaces UP (PROD-24343)"
- cmd: |
- set -ex
- git clone https://gerrit.mcp.mirantis.com/salt-formulas/linux /root/salt-formula-linux
- cd /root/salt-formula-linux
- git fetch https://gerrit.mcp.mirantis.com/salt-formulas/linux refs/changes/32/29432/11 && git checkout FETCH_HEAD
- cp -r /root/salt-formula-linux/linux/ /srv/salt/env/prd/
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 4c43578..0000000
--- a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo touch /is_cloud_init_started
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
-
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - echo "******** MOUNT CONFIG DRIVE"
- # Mount config drive
- - mkdir /root/config-drive
- - mount /dev/sr0 /root/config-drive
-
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- #- sudo ifdown ens3
- #- sudo ip r d default || true # remove existing default route to get it from dhcp
- #- sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- # Run user data script from config drive
- - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3; ip l set down ens3
- - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4; ip l set down ens4
- - rm -f /etc/network/interfaces
- #- ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
- #- cp /root/config-drive/user-data /root/user-data
- #- sed -i '/^reboot$/d' /root/user-data
- #- set -x; cd /root && /bin/bash -xe ./user-data
- - |
- set -x
- cd /root/config-drive
- if /bin/bash -xe ./user-data; then
- touch /is_cloud_init_finished
- else
- set +x
- echo "bootstrap script /root/config-drive/user-data failed\n" > /is_cloud_init_failed
- fi
-
- # Enable root access (after reboot)
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- #- path: /etc/network/interfaces
- - path: /root/interfaces
- content: |
- auto lo
- iface lo inet loopback
-
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 60
- ServerAliveCountMax 0
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml
index 319c007..81936a4 100644
--- a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -57,3 +60,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml
index 3a65412..438ee51 100644
--- a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml
@@ -13,9 +13,6 @@
{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-queens-dvr-sl') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
@@ -48,10 +45,6 @@
gateway: +1
l2_network_device: +1
default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_KVM }}: +240
- default_{{ HOSTNAME_KVM01 }}: +241
- default_{{ HOSTNAME_KVM02 }}: +242
- default_{{ HOSTNAME_KVM03 }}: +243
default_{{ HOSTNAME_CID }}: +90
default_{{ HOSTNAME_CID01 }}: +91
default_{{ HOSTNAME_CID02 }}: +92
@@ -82,10 +75,6 @@
gateway: +1
l2_network_device: +1
default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_KVM }}: +240
- default_{{ HOSTNAME_KVM01 }}: +241
- default_{{ HOSTNAME_KVM02 }}: +242
- default_{{ HOSTNAME_KVM03 }}: +243
default_{{ HOSTNAME_CID }}: +90
default_{{ HOSTNAME_CID01 }}: +91
default_{{ HOSTNAME_CID02 }}: +92
@@ -220,7 +209,7 @@
role: salt_master
params:
vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
- memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+ memory: {{ os_env('CFG_NODE_MEMORY', 12288) }}
boot:
- hd
volumes:
@@ -727,84 +716,6 @@
interfaces: *all_interfaces
network_config: *all_network_config
- - name: {{ HOSTNAME_KVM01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
- memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KVM02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
- memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KVM03 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
- memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- name: {{ HOSTNAME_CID01 }}
role: salt_minion
params:
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-cookiecutter-mcp-mitaka-dvr-ceph.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-cookiecutter-mcp-mitaka-dvr-ceph.yaml
deleted file mode 100644
index 16b73bd..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-cookiecutter-mcp-mitaka-dvr-ceph.yaml
+++ /dev/null
@@ -1,223 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- designate_backend: bind
- ceph_cluster_network: 172.16.10.0/24
- ceph_enabled: 'True'
- ceph_hyper_converged: 'False'
- ceph_mon_node01_address: 172.16.10.66
- ceph_mon_node01_hostname: cmn01
- ceph_mon_node02_address: 172.16.10.67
- ceph_mon_node02_hostname: cmn02
- ceph_mon_node03_address: 172.16.10.68
- ceph_mon_node03_hostname: cmn03
- ceph_osd_backend: bluestore
- ceph_osd_block_db_size: '10'
- ceph_osd_bond_mode: active-backup
- ceph_osd_count: '2'
- ceph_osd_data_disks: /dev/vdb
- ceph_osd_journal_or_block_db_disks: /dev/vdc
- ceph_osd_node_count: '2'
- ceph_osd_journal_size: '10'
- ceph_osd_primary_first_nic: eth1
- ceph_osd_primary_second_nic: eth2
- ceph_osd_rack01_backend_subnet: 172.16.10
- ceph_osd_rack01_hostname: osd
- ceph_osd_rack01_single_subnet: 172.16.10
- ceph_osd_single_address_ranges: 172.16.10.94-172.16.10.95
- ceph_osd_deploy_address_ranges: 172.16.11.94-172.16.11.95
- ceph_osd_backend_address_ranges: 172.16.10.94-172.16.10.95
- ceph_public_network: 172.16.10.0/24
- ceph_rgw_address: 172.16.10.75
- ceph_rgw_hostname: rgw
- ceph_rgw_node01_address: 172.16.10.76
- ceph_rgw_node01_hostname: rgw01
- ceph_rgw_node02_address: 172.16.10.77
- ceph_rgw_node02_hostname: rgw02
- ceph_rgw_node03_address: 172.16.10.78
- ceph_rgw_node03_hostname: rgw03
- ceph_version: luminous
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-mitaka-dvr-ceph.local
- cluster_name: cookied-mcp-mitaka-dvr-ceph
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 8.8.8.8
- dns_server02: 8.8.4.4
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- gnocchi_aggregation_storage: ceph
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: mitaka
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_notification_app_id: '24'
- oss_notification_sender_password: password
- oss_notification_smtp_port: '587'
- oss_notification_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
- stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/core.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/core.yaml
deleted file mode 100644
index 546cc34..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/openstack.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/openstack.yaml
deleted file mode 100644
index 318a992..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/openstack.yaml
+++ /dev/null
@@ -1,174 +0,0 @@
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{% import 'shared-ceph.yaml' as SHARED_CEPH with context %}
-
-# Deploy nginx before openstack services (PROD-22740)
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MONS() }}
-
-{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MGR() }}
-
-{{ SHARED_CEPH.MACRO_INSTALL_CEPH_OSD_AND_RADOSGW() }}
-
-{{ SHARED_CEPH.CONNECT_CEPH_TO_SERVICES() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-# install designate backend
-- description: Install bind
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@bind:server' state.sls bind
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://images.mirantis.com.s3.amazonaws.com/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set gateway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/salt.yaml
deleted file mode 100644
index ef50b6d..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/salt.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-mitaka-dvr-ceph/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':', 1) %}
-- description: Override cluster parameters
- cmd: |
- salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endfor %}
-
-- description: Refresh pillar
- cmd: salt '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endif %}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/sl.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/sl.yaml
deleted file mode 100644
index cb93ac9..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/sl.yaml
+++ /dev/null
@@ -1,184 +0,0 @@
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-# Install MongoDB for alerta
-- description: Install MongoDB
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data-cfg01.yaml
deleted file mode 100644
index d75dab1..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- # Enable grub menu using updated config below
- - update-grub
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay.yaml
deleted file mode 100644
index 248d63e..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay.yaml
+++ /dev/null
@@ -1,771 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-mitaka-dvr-ceph/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-mitaka-dvr-ceph/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-mitaka-dvr-ceph/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-mitaka-dvr-ceph') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.') %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.') %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.') %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.') %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.') %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.') %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.') %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.') %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.') %}
-{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.') %}
-{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.') %}
-{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.') %}
-{% set HOSTNAME_RGW01 = os_env('HOSTNAME_CMN01', 'rgw01.') %}
-{% set HOSTNAME_RGW02 = os_env('HOSTNAME_CMN02', 'rgw02.') %}
-{% set HOSTNAME_RGW03 = os_env('HOSTNAME_CMN03', 'rgw03.') %}
-{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd001.') %}
-{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd002.') %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.') %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.') %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-mitaka-dvr-ceph_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_CMN01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: ceph_osd
- capacity: 50
- format: qcow2
- - name: ceph_journal
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: ceph_osd
- capacity: 50
- format: qcow2
- - name: ceph_journal
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_RGW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_RGW02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
- - name: {{ HOSTNAME_RGW03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
deleted file mode 100644
index 58281a4..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
+++ /dev/null
@@ -1,187 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-mitaka-dvr.local
- cluster_name: cookied-mcp-mitaka-dvr
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 8.8.8.8
- dns_server02: 8.8.4.4
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: mitaka
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_notification_app_id: '24'
- oss_notification_sender_password: password
- oss_notification_smtp_port: '587'
- oss_notification_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
- stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-environment.yaml
deleted file mode 100644
index 931efcb..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-environment.yaml
+++ /dev/null
@@ -1,172 +0,0 @@
-nodes:
- cfg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_designate_pool_manager_keystone
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - features_designate_pool_manager_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- dns01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node01
- roles:
- - features_designate_pool_manager_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node01_address}
-
- dns02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node02
- roles:
- - features_designate_pool_manager_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node02_address}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml
deleted file mode 100644
index 8954160..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml
deleted file mode 100644
index b335251..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml
+++ /dev/null
@@ -1,188 +0,0 @@
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
-{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-# Install OpenStack control services
-
-{%- if OVERRIDE_POLICY != '' %}
-- description: Upload policy override
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: overrides-policy.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Create custom cluster control class
- cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Rename control classes
- cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
- ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
-{%- endif %}
-
-# Deploy nginx before openstack services (PROD-22740)
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-# install designate backend
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@powerdns:server' state.sls powerdns.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://images.mirantis.com.s3.amazonaws.com/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set gateway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/overrides-policy.yml
deleted file mode 100644
index 1f35a6b..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/overrides-policy.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-parameters:
- nova:
- controller:
- policy:
- context_is_admin: 'role:admin or role:administrator'
- 'compute:create': 'rule:admin_or_owner'
- 'compute:create:attach_network':
- cinder:
- controller:
- policy:
- 'volume:delete': 'rule:admin_or_owner'
- 'volume:extend':
- neutron:
- server:
- policy:
- create_subnet: 'rule:admin_or_network_owner'
- 'get_network:queue_id': 'rule:admin_only'
- 'create_network:shared':
- glance:
- server:
- policy:
- publicize_image: "role:admin"
- add_member:
- keystone:
- server:
- policy:
- admin_or_token_subject: 'rule:admin_required or rule:token_subject'
- heat:
- server:
- policy:
- context_is_admin: 'role:admin and is_admin_project:True'
- deny_stack_user: 'not role:heat_stack_user'
- deny_everybody: '!'
- 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
- 'cloudformation:DescribeStackResources':
- ceilometer:
- server:
- policy:
- segregation: 'rule:context_is_admin'
- 'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
deleted file mode 100644
index 240f6e3..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-mitaka-dvr/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':', 1) %}
-- description: Override cluster parameters
- cmd: |
- salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endfor %}
-
-- description: Refresh pillar
- cmd: salt '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endif %}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-# WORKAROUND PROD-21071
-- description: Set correct pin for openstack repository
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin: release l=mitaka/Pin: release l=xenial\/openstack\/mitaka testing/g' /etc/apt/preferences.d/mirantis_openstack"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/sl.yaml
deleted file mode 100644
index f2a0907..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/sl.yaml
+++ /dev/null
@@ -1,184 +0,0 @@
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-# Install MongoDB for alerta
-- description: Install MongoDB
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml
deleted file mode 100644
index d75dab1..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- # Enable grub menu using updated config below
- - update-grub
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
deleted file mode 100644
index 81afdb5..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
+++ /dev/null
@@ -1,575 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-mitaka-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-mitaka-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-mitaka-dvr') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-mitaka-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml
deleted file mode 100644
index ecc8054..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml
+++ /dev/null
@@ -1,186 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-mitaka-ovs.local
- cluster_name: cookied-mcp-mitaka-ovs
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'False'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: mitaka
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 172.16.10.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 172.16.10.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 172.16.10.63
- stacklight_log_node03_hostname: log03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 172.16.10.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 172.16.10.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 172.16.10.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 172.16.10.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- stacklight_long_term_storage_type: influxdb
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-environment.yaml
deleted file mode 100644
index 931efcb..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-environment.yaml
+++ /dev/null
@@ -1,172 +0,0 @@
-nodes:
- cfg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_designate_pool_manager_keystone
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - features_designate_pool_manager_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- dns01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node01
- roles:
- - features_designate_pool_manager_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node01_address}
-
- dns02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node02
- roles:
- - features_designate_pool_manager_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node02_address}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml
deleted file mode 100644
index 6a1278e..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml
deleted file mode 100644
index 6672997..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml
+++ /dev/null
@@ -1,210 +0,0 @@
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-# Deploy nginx before openstack services (PROD-22740)
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-# SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON
-
-- description: Install neutron service on primary node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C "I@neutron:server and *01*" state.sls neutron.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron service on other nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C "I@neutron:server" state.sls neutron.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# WORKAROUND PROD-20976
-- description: WORKAROUND PROD-20976
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server or I@neutron:gateway' cmd.run "sed -i
- 's/#min_l3_agents_per_router = 2/min_l3_agents_per_router = 1/'
- /etc/neutron/neutron.conf"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart Neutron services
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server or I@neutron:gateway' cmd.run 'systemctl restart
- neutron*'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 20}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-# install designate backend
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@powerdns:server' state.sls powerdns.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://images.mirantis.com.s3.amazonaws.com/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set gateway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; openstack security group rule create --proto tcp --dst-port 22 default'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; openstack security group rule create --proto icmp default'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
deleted file mode 100644
index 9f3767b..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-mitaka-ovs/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':', 1) %}
-- description: Override cluster parameters
- cmd: |
- salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endfor %}
-
-- description: Refresh pillar
- cmd: salt '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endif %}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-# WORKAROUND PROD-21071
-- description: Set correct pin for openstack repository
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin: release l=mitaka/Pin: release l=xenial\/openstack\/mitaka testing/g' /etc/apt/preferences.d/mirantis_openstack"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/sl.yaml
deleted file mode 100644
index 010324c..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/sl.yaml
+++ /dev/null
@@ -1,258 +0,0 @@
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-# Install MongoDB for alerta
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Create MongoDB cluster
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 20}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
- cmd: |
- FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Fluentd service presence: ${FLUENTD_SERVICE}";
- if [[ "$FLUENTD_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- else
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
- cmd: |
- CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Ceilometer service presence: ${CEILO}";
- if [[ "$CEILO" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus,heka.remote_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Launch containers
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml
deleted file mode 100644
index 4fee5c5..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml
+++ /dev/null
@@ -1,572 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-mitaka-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-mitaka-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-mitaka-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-mitaka-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: false
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml
deleted file mode 100644
index 9cb3979..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml
+++ /dev/null
@@ -1,187 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-newton-dvr.local
- cluster_name: cookied-mcp-newton-dvr
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: newton
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
- stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/_context-environment.yaml
deleted file mode 100644
index 6d958a6..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-environment.yaml
+++ /dev/null
@@ -1,182 +0,0 @@
-nodes:
- cfg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_designate_pool_manager_keystone
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - features_designate_pool_manager_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- dns01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node01
- roles:
- - features_designate_pool_manager_dns
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.extra
- - system.linux.system.repo.mcp.apt_mirantis.openstack
- - system.linux.system.repo.mcp.apt_mirantis.ubuntu
- - system.linux.system.repo.mcp.apt_mirantis.saltstack
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node01_address}
-
- dns02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node02
- roles:
- - features_designate_pool_manager_dns
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.extra
- - system.linux.system.repo.mcp.apt_mirantis.openstack
- - system.linux.system.repo.mcp.apt_mirantis.ubuntu
- - system.linux.system.repo.mcp.apt_mirantis.saltstack
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node02_address}
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/core.yaml
deleted file mode 100644
index edb5059..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml
deleted file mode 100644
index df28c5a..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml
+++ /dev/null
@@ -1,188 +0,0 @@
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
-{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-# Install OpenStack control services
-
-{%- if OVERRIDE_POLICY != '' %}
-- description: Upload policy override
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: overrides-policy.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Create custom cluster control class
- cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Rename control classes
- cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
- ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
-{%- endif %}
-
-# Deploy nginx before openstack services (PROD-22740)
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-# install designate backend
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@powerdns:server' state.sls powerdns.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://images.mirantis.com.s3.amazonaws.com/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set gateway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-newton-dvr/overrides-policy.yml
deleted file mode 100644
index 1f35a6b..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/overrides-policy.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-parameters:
- nova:
- controller:
- policy:
- context_is_admin: 'role:admin or role:administrator'
- 'compute:create': 'rule:admin_or_owner'
- 'compute:create:attach_network':
- cinder:
- controller:
- policy:
- 'volume:delete': 'rule:admin_or_owner'
- 'volume:extend':
- neutron:
- server:
- policy:
- create_subnet: 'rule:admin_or_network_owner'
- 'get_network:queue_id': 'rule:admin_only'
- 'create_network:shared':
- glance:
- server:
- policy:
- publicize_image: "role:admin"
- add_member:
- keystone:
- server:
- policy:
- admin_or_token_subject: 'rule:admin_required or rule:token_subject'
- heat:
- server:
- policy:
- context_is_admin: 'role:admin and is_admin_project:True'
- deny_stack_user: 'not role:heat_stack_user'
- deny_everybody: '!'
- 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
- 'cloudformation:DescribeStackResources':
- ceilometer:
- server:
- policy:
- segregation: 'rule:context_is_admin'
- 'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml
deleted file mode 100644
index 52ec2f4..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-cookied-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-newton-dvr/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':', 1) %}
-- description: Override cluster parameters
- cmd: |
- salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endfor %}
-
-- description: Refresh pillar
- cmd: salt '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endif %}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-# WORKAROUND PROD-21071
-- description: Set correct pin for openstack repository
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin: release l=newton/Pin: release l=xenial\/openstack\/newton testing/g' /etc/apt/preferences.d/mirantis_openstack"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/sl.yaml
deleted file mode 100644
index 807e362..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/sl.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml
deleted file mode 100644
index d75dab1..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- # Enable grub menu using updated config below
- - update-grub
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml
deleted file mode 100644
index 7d6147d..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml
+++ /dev/null
@@ -1,575 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-newton-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-newton-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-newton-dvr') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-newton-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml
deleted file mode 100644
index 8049430..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml
+++ /dev/null
@@ -1,188 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-newton-ovs.local
- cluster_name: cookied-mcp-newton-ovs
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: U1hx5V31VJfFFBu8fCsk9ebDN2TwuBABTIcptYQ8tmFSlhSxHIkKnJnDsnckgKnH
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'False'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: newton
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: PGah7Ph3IdWuMdAX3ZBLSf5BtlBG1Qhl
- salt_api_password_hash: $6$kgvztcjH$9B2950AyxRjE2Tj5QNVCnvdrgaFo/u6c59pMoQPqfxs2MTLLU7ywxPTQnDH3cNV.BBEK6FilF9SulWfIfENou0
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
- stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
- rsync_fernet_rotation: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/_context-environment.yaml
deleted file mode 100644
index 4970aec..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-environment.yaml
+++ /dev/null
@@ -1,150 +0,0 @@
-nodes:
- cfg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
- - features_designate_bind9_keystone
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - features_designate_bind9_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/core.yaml
deleted file mode 100644
index 4b79fcb..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml
deleted file mode 100644
index c10aa28..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml
+++ /dev/null
@@ -1,210 +0,0 @@
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-# Deploy nginx before openstack services (PROD-22740)
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-# {{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-- description: Install neutron service on primary node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C "I@neutron:server and *01*" state.sls neutron.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron service on other nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C "I@neutron:server" state.sls neutron.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# WORKAROUND PROD-20976
-- description: WORKAROUND PROD-20976
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server or I@neutron:gateway' cmd.run "sed -i
- 's/#min_l3_agents_per_router = 2/min_l3_agents_per_router = 1/'
- /etc/neutron/neutron.conf"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart Neutron services
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server or I@neutron:gateway' cmd.run 'systemctl restart
- neutron*'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 20}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-# install designate backend
-- description: Install bind
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@bind:server' state.sls bind
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://images.mirantis.com.s3.amazonaws.com/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set gateway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml
deleted file mode 100644
index 89b705e..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-newton-ovs/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':', 1) %}
-- description: Override cluster parameters
- cmd: |
- salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endfor %}
-
-- description: Refresh pillar
- cmd: salt '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endif %}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-
-# WORKAROUND PROD-21071
-- description: Set correct pin for openstack repository
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin: release l=newton/Pin: release l=xenial\/openstack\/newton testing/g' /etc/apt/preferences.d/mirantis_openstack"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/sl.yaml
deleted file mode 100644
index 8ce7ea8..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/sl.yaml
+++ /dev/null
@@ -1,176 +0,0 @@
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml
deleted file mode 100644
index 883c30f..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml
+++ /dev/null
@@ -1,512 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-newton-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-newton-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-newton-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-newton-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/core.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/core.yaml
deleted file mode 100644
index a3508a6..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/core.yaml
+++ /dev/null
@@ -1,130 +0,0 @@
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-- description: Refresh grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install nginx on prx nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the OpenStack control VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
- echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
deleted file mode 100644
index 43483ae..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
+++ /dev/null
@@ -1,338 +0,0 @@
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set PATTERN = os_env('PATTERN', 'smoke') %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# isntall designate
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'ctl*' state.sls powerdns
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-#- description: Allow all tcp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-#- description: Allow all icmp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temp workaround of PROD-13167
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run
- 'apt-get install python-pymysql -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-# Configure cinder-volume salt-call PROD-13167
-- description: Create physical volumes on a second disk
- cmd: salt 'ctl*' cmd.run 'pvcreate -y /dev/vdb'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: "Temporary WR set enabled_backends = lvm for cinder"
- cmd: salt 'ctl*' cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt 'gtw01*' cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: salt "gtw01*" cmd.run 'iptables --policy FORWARD ACCEPT'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
deleted file mode 100644
index f64b373..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
+++ /dev/null
@@ -1,326 +0,0 @@
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install OSS: Operational Support System Tools
-
-# Glusterfs
-#-----------
-
-- description: Prepare glusterfs service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@glusterfs:server:enabled:True' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@glusterfs:server:enabled:True' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@glusterfs:client:enabled:True' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server:enabled:True' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Elasticsearch (system service)
-#-------------------------------
-- description: Setup Elasticsearch
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: Setup Elasticsearch
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-# Setup Docker Swarm
-#-------------------
-
-- description: "Workaround: create /var/lib/jenkins to get Jenkins slaves working"
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@jenkins:client' cmd.run 'mkdir -p /var/lib/jenkins'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Prepare Docker host
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host:enabled:True' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Docker Swarm master
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Collect grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls salt.minion.grains &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' mine.flush &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' mine.update &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules &&
- sleep 10
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 15}
- skip_fail: false
-
-- description: Install Docker Swarm on other nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Show Docker Swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Keepalived
-#-----------
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster:enabled:True' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@haproxy:proxy:enabled:True' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the CICD VIP
- cmd: |
- CICD_CONTROL_ADDRESS=`salt --out=newline_values_only -C 'I@haproxy:proxy and I@jenkins:client' pillar.get _param:cluster_vip_address`;
- echo "_param:cluster_vip_address (vip): ${CICD_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C "I@keepalived:cluster:instance:*:address:${CICD_CONTROL_ADDRESS}" cmd.run "ip a | grep ${CICD_CONTROL_ADDRESS}" | grep -B1 ${CICD_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Configure OSS services
-#-----------------------
-
-- description: Setup devops portal
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@devops_portal:config:enabled' state.sls devops_portal.config
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup Rundeck server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@rundeck:server' state.sls rundeck.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Deploy Docker services
-#-----------------------
-
-# Original comment from pipeline: XXX: for some weird unknown reason, refresh_pillar is required to execute here
-
-- description: "Workaround from the pipeline: XXX: for some weird unknown reason, refresh_pillar is required to execute here"
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:publisher' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Workaround from the pipeline: We need /etc/aptly-publisher.yaml to be present before services are deployed. [dd: there were issues when /etc/aptly-publisher.yaml becomes a directory, so this step should be considered]"
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:publisher' state.sls aptly.publisher
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install Docker client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: "Workaround from the pipeline: sync all salt objects"
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-# Aptly
-#------
-
-#### Steps are commented due to PROD-17598
-
-#- description: "Wait for Aptly to come up in container..."
-# cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:server' cmd.run
-# 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
-# while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:8084/api/version && break; sleep 2; done'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 3, delay: 15}
-# skip_fail: false
-
-#- description: "Setup Aptly"
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:server' state.sls aptly
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 3, delay: 10}
-# skip_fail: false
-
-# OpenLDAP
-#---------
-
-- description: "Waiting for OpenLDAP to come up in container..."
- cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@openldap:client' cmd.run
- 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
- while true; do curl -sf ldap://${CICD_CONTROL_ADDRESS} && break; sleep 2; done'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-- description: "Setup OpenLDAP"
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@openldap:client' state.sls openldap &&
- sleep 20
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-# Gerrit
-#-------
-
-- description: "Waiting for Gerrit to come up in container..."
- cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gerrit:client' cmd.run
- 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
- while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:8080/config/server/version && break; sleep 2; done'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-- description: "Setup Gerrit"
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gerrit:client' state.sls gerrit
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-# Jenkins
-#--------
-
-- description: "Waiting for Jenkins to come up in container..."
- cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:client:stack:jenkins' cmd.run
- 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
- export JENKINS_CLIENT_USER=$(salt-call --out=newline_values_only pillar.get _param:jenkins_client_user);
- export JENKINS_CLIENT_PASSWORD=$(salt-call --out=newline_values_only pillar.get _param:jenkins_client_password);
- while true; do
- curl -f -u ${JENKINS_CLIENT_USER}:${JENKINS_CLIENT_PASSWORD} http://${CICD_CONTROL_ADDRESS}:8081/api/json?pretty=true && break;
- sleep 2;
- done'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-- description: "Setup Jenkins"
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@jenkins:client' state.sls jenkins
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-# Postgres && Pushkin
-#--------------------
-
-- description: "Waiting for postgresql database to come up in container..."
-# cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' cmd.run
-# 'while true; do if docker service logs postgresql_db | grep -q "ready to accept"; then break; else sleep 5; fi; done'
- cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:client:stack:postgresql' cmd.run
- 'while true; do if docker service logs postgresql_postgresql-db | grep -q "ready to accept"; then break; else sleep 5; fi; done'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: ("Create PostgreSQL databases, it fails at first run because of known deadlock:\n"
- "1. State postgresql.client cannot insert values into 'pushkin' database because it is created empty,\n"
- "2. Container with Pushkin cannot start and fill the database scheme until state postgresql.client created users.")
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' state.sls postgresql.client -b 1 &&
- timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:client:stack:postgresql' cmd.run
- 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
- while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:8887/apps && break; sleep 2; done'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 100}
- skip_fail: false
-
-# Rundeck
-#--------
-
-- description: Waiting for Rundeck to come up in container...
- cmd: timeout 30 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:client:stack:rundeck' cmd.run
- 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
- while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:4440 && break; sleep 2; done'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: Setup Rundeck
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@rundeck:client' state.sls rundeck.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-# Elasticsearch (in container, disabled until https://mirantis.jira.com/browse/PROD-15297 is not fixed)
-#--------------
-#- description: 'Waiting for Elasticsearch to come up in container...'
-# cmd: timeout 30 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' cmd.run
-# 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
-# while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:9200/?pretty && break; sleep 2; done'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 3, delay: 10}
-# skip_fail: false
-#
-#- description: Setup Elasticsearch
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 3, delay: 10}
-# skip_fail: false
-
-
-# Generate docs
-#--------------
-
-- description: Install sphinx (may fail depending on the model)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Generate docs (may fail depending on the model)
- cmd: salt-run state.orchestrate sphinx.orch.generate_doc
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Run salt minion to create cert files for nginx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install nginx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-# Final checks
-#-------------
-
-- description: Check for system services in failed state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "systemctl --failed | grep -E 'loaded[ \t]+failed' && echo 'Command execution failed' || true"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
deleted file mode 100644
index 91c0506..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
+++ /dev/null
@@ -1,288 +0,0 @@
-default_context:
- mcp_version: testing
- cicd_control_node01_address: 10.167.4.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.4.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.4.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.4.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxgROyM+RnJiDhS/qfXayxDbGmMqIGWsWPnc1RdMcJ9xlBM4a
- bj4iNB7wmj19oMRBXKvrvMsbnhOJ1Z1tWH1jwm3rZ7ziJlDUo1Ms/wAPXV67+ayu
- LCSp6JGTzaxo/4YTfzUvvnCJvPKuIf+BjxE6/Rzdzrp6b4FYuvOfkxN/pK4HfrrR
- wJjyQCCeXGrDcq3vKvBaZ/19MN5JtrrCRx4L42UFy1kAkNCCRir+YsK+tiDM3Tfo
- r95PNXdMyeKzMEc858D9XxK+UyNFjGrO2hZm6fmRjgWMuNnaGnVotmT1z1pB91d3
- 5q7n60d3Q7KRn6p+xStrwB7rB9+Jsi3L6q+VEQIDAQABAoIBAExCJnExdvtexO/K
- 9zxgNRJZofv/q5BWbFecIxkD50j2mLTUBtvD8/osnC5aVgJO8nkVAJFRiw5Cqgdp
- PE4i5ANhv5HQ7SsiX/GSO7bst/4WWMAbn2wCpqiZP9mqdzlI0kNgIUXvIyxwLV2M
- f8GwOg52Snmt2S8OGjTNU/wZO0QYzzi44tf2/q0QWy0EV4g2oLq66T/kKpx5FmZQ
- 0cD9GiESfmzWiq2Aivy4if7VmW4fCxTIvmUypSQf+M4J7ZR6QYUbkr19wNEiYAUq
- k9aitJNIVW0johbZwexTTF1YiIVuvSwOI/lHGz1e7iVu/hZxx35JtkzLzF9Dd01q
- M0IMXz0CgYEA95aOjqJTp2KQT++Q4uPl/K1FLNquqZ02SyUNVglkuVn6THHsTC8Q
- MfO+l39bh1QGTK/bh4dyXub2jEYfTSn1K8YMOYp57tgHTJ0Y8AZbtYaEP0g3BeO5
- Myd1/YUY+vM6h58wyoqhDLwRql5u5GM8HAibK32d+Fnrf3VSM0i4jT8CgYEAzL6Y
- c8Fu4ezRiKR1x7jSgbePADRZa7xvLKenuMMYmtg+AixEp5nmm9/vBtmrhE+RQNXw
- mQvt8EId/XGcJhv83Y+QeYg3AhsdGMIYmlGhFGJ3FtcA72wt3FTGOa2KMtmI6khL
- WqYohvESfLtCumW0XPRRUVNKF73UKjMa8VnsOa8CgYBto/CRXXUqJM2/eFlzAHUy
- hhCiIl1Co2oNsOTM+u/t3NiozbJUsmq7lDMMp8uCjEUV5LKUu/h76k+4Ir1t0GzP
- 664yNQ52JJhm5xLKCCbIpj8ePv6Ozx+OdaUclbpQNzHuKSLULrvPBeHUzmjRHtjZ
- mT4N7lzsQ/WzxeKW71c6xQKBgDGrj1qNs7O1ewO2OiiQqujzOgrnqEXdue7QYX0O
- P3rZOPnX+XPbfzmTcu5rghOgJfHftPW8EiY2NAZXOHV6Vrb9bCQ/qnClWUK3W7ac
- VQKX/KIa2Mw8p0eLfWditWMuqOuFTFqacryB4WVHHKIRqFbgopWjKhdmYwE10rR4
- hzlbAoGBAMpZ+D08Us5wrsbVlYfOobuHgq2ENPvQnZqJfTobAPGtrMk/M7M4Ga1U
- +zeO8VA0Tj5jK2qI+MIB2hZmgjp49FbejKFAD+q3srkyqwkGerNXkWOiDGmvYhKR
- UbC4GcycVQsIZK4bw0K7Pl40/u9artsAFmWOoUunyO4QH8J8EDXJ
- -----END RSA PRIVATE KEY-----
- cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGBE7Iz5GcmIOFL+p9drLENsaYyogZaxY+dzVF0xwn3GUEzhpuPiI0HvCaPX2gxEFcq+u8yxueE4nVnW1YfWPCbetnvOImUNSjUyz/AA9dXrv5rK4sJKnokZPNrGj/hhN/NS++cIm88q4h/4GPETr9HN3OunpvgVi685+TE3+krgd+utHAmPJAIJ5casNyre8q8Fpn/X0w3km2usJHHgvjZQXLWQCQ0IJGKv5iwr62IMzdN+iv3k81d0zJ4rMwRzznwP1fEr5TI0WMas7aFmbp+ZGOBYy42doadWi2ZPXPWkH3V3fmrufrR3dDspGfqn7FK2vAHusH34myLcvqr5UR
- cluster_domain: cicd-sl2.local
- cluster_name: integration-dop-sl2
- deployment_type: physical
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- control_network_netmask: 255.255.255.0
- control_vlan: '10'
- deploy_network_gateway: ''
- deploy_network_netmask: 255.255.255.0
- dns_server01: 172.18.176.6
- dns_server02: 172.18.176.6
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_deploy_nic: eth0
- maas_deploy_address: 10.167.4.91
- maas_hostname: mas01
- infra_kvm01_control_address: ${_param:cicd_control_node01_address}
- infra_kvm01_deploy_address: 10.167.5.91
- infra_kvm01_hostname: ${_param:cicd_control_node01_hostname}
- infra_kvm02_control_address: ${_param:cicd_control_node02_address}
- infra_kvm02_deploy_address: 10.167.5.92
- infra_kvm02_hostname: ${_param:cicd_control_node02_hostname}
- infra_kvm03_control_address: ${_param:cicd_control_node03_address}
- infra_kvm03_deploy_address: 10.167.5.93
- infra_kvm03_hostname: ${_param:cicd_control_node03_hostname}
- infra_kvm_vip_address: ${_param:cicd_control_address}
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- opencontrail_enabled: 'False'
- openldap_domain: cicd-sl2.local # Must be plain text because cookiecutter-templates split it by dots
- openldap_enabled: 'True'
- openldap_organisation: ${_param:cluster_name}
- openstack_compute_count: '100'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.4
- openstack_compute_rack01_tenant_subnet: 10.167.6
- openstack_control_address: 10.167.4.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.4.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.4.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.4.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: ${_param:openstack_control_address}
- openstack_database_hostname: ${_param:openstack_control_hostname}
- openstack_database_node01_address: ${_param:openstack_control_node01_address}
- openstack_database_node01_hostname: ${_param:openstack_control_node01_hostname}
- openstack_database_node02_address: ${_param:openstack_control_node02_address}
- openstack_database_node02_hostname: ${_param:openstack_control_node02_hostname}
- openstack_database_node03_address: ${_param:openstack_control_node03_address}
- openstack_database_node03_hostname: ${_param:openstack_control_node03_hostname}
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 10.167.4.224
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.167.6.6
- openstack_gateway_node02_address: 10.167.4.225
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.167.6.7
- openstack_gateway_node03_address: 10.167.4.226
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.167.6.8
- openstack_message_queue_address: ${_param:openstack_control_address}
- openstack_message_queue_hostname: ${_param:openstack_control_hostname}
- openstack_message_queue_node01_address: ${_param:openstack_control_node01_address}
- openstack_message_queue_node01_hostname: ${_param:openstack_control_node01_hostname}
- openstack_message_queue_node02_address: ${_param:openstack_control_node02_address}
- openstack_message_queue_node02_hostname: ${_param:openstack_control_node02_hostname}
- openstack_message_queue_node03_address: ${_param:openstack_control_node03_address}
- openstack_message_queue_node03_hostname: ${_param:openstack_control_node03_hostname}
- openstack_network_engine: ovs
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_neutron_qos: 'False'
- openstack_ovs_encapsulation_type: vlan
- openstack_ovs_encapsulation_vlan_range: 2418:2420
- openstack_proxy_address: 10.167.4.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.4.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.4.82
- openstack_proxy_node02_hostname: prx02
- openstack_benchmark_node01_hostname: bmk01
- openstack_benchmark_node01_address: 10.167.4.85
- openstack_version: ocata
- public_host: ${_param:openstack_proxy_address}
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_master_address: 10.167.4.15
- salt_master_hostname: cfg01
- salt_master_management_address: 10.167.5.15
- stacklight_enabled: 'True'
- stacklight_version: '2'
- fluentd_enabled: 'True'
- stacklight_monitor_address: 10.167.4.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.4.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.4.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.4.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_log_address: ${_param:stacklight_monitor_address}
- stacklight_log_hostname: ${_param:stacklight_monitor_hostname}
- stacklight_log_node01_address: ${_param:stacklight_monitor_node01_address}
- stacklight_log_node01_hostname: ${_param:stacklight_monitor_node01_hostname}
- stacklight_log_node02_address: ${_param:stacklight_monitor_node02_address}
- stacklight_log_node02_hostname: ${_param:stacklight_monitor_node02_hostname}
- stacklight_log_node03_address: ${_param:stacklight_monitor_node03_address}
- stacklight_log_node03_hostname: ${_param:stacklight_monitor_node03_hostname}
- stacklight_telemetry_address: ${_param:stacklight_monitor_address}
- stacklight_telemetry_hostname: ${_param:stacklight_monitor_hostname}
- stacklight_telemetry_node01_address: ${_param:stacklight_monitor_node01_address}
- stacklight_telemetry_node01_hostname: ${_param:stacklight_monitor_node01_hostname}
- stacklight_telemetry_node02_address: ${_param:stacklight_monitor_node02_address}
- stacklight_telemetry_node02_hostname: ${_param:stacklight_monitor_node02_hostname}
- stacklight_telemetry_node03_address: ${_param:stacklight_monitor_node03_address}
- stacklight_telemetry_node03_hostname: ${_param:stacklight_monitor_node03_hostname}
- stacklight_long_term_storage_type: influxdb
- tenant_network_gateway: ''
- tenant_network_netmask: 255.255.255.0
- tenant_vlan: '20'
- oss_enabled: 'True'
- oss_openstack_auth_url: http://${_param:openstack_control_address}:5000/v3
- oss_openstack_username: admin
- oss_openstack_password: password
- oss_openstack_project: admin
- oss_openstack_domain_id: default
- oss_openstack_cert: |-
- -----BEGIN CERTIFICATE-----
- MIIF0TCCA7mgAwIBAgIJAJgb8XpikoRNMA0GCSqGSIb3DQEBCwUAMEoxCzAJBgNV
- BAYTAmN6MRcwFQYDVQQDDA5TYWx0IE1hc3RlciBDQTEPMA0GA1UEBwwGUHJhZ3Vl
- MREwDwYDVQQKDAhNaXJhbnRpczAeFw0xNzEwMTkxNDU1MTJaFw0yNzEwMTcxNDU1
- MTJaMEoxCzAJBgNVBAYTAmN6MRcwFQYDVQQDDA5TYWx0IE1hc3RlciBDQTEPMA0G
- A1UEBwwGUHJhZ3VlMREwDwYDVQQKDAhNaXJhbnRpczCCAiIwDQYJKoZIhvcNAQEB
- BQADggIPADCCAgoCggIBALMl9l2+98lUSwhRdud2pUvUdEYwXM/fZb4BeHX43Qsr
- hKzId922V3nbliT2VDk4OHck9msKDrDQfYpcXhblt8n077/brvg3c1jt/g9N7OwA
- zk6HFy7Vw0tICJyl4yExYVrpt2Ge0uLf5vkl+f82H2qUXUzlsl1sJ4tz57g448L1
- 26VCRlO5hGuF1Y7w0ZHL5bDhREnsmsWS4gFqfCOR3x+5ortdAEXn1KTON213BkGa
- e13WKWuOcJSMtEvMtTT/6z1MuklGUAZim8++0poauEQwb/RjF9gQuFHNVQbyylW8
- 9/u7EHAEd94VWWGzWlOh9BG/UjNA+JWGmBVS3a3Fij1tp4MbkkiN8s4DdtA0PPDs
- SPUjKQDOdb/sQif6rfVtb292Vn2InTGsQ7+kg4yMo7aoduyBAdp3UbysmWJrsifE
- ZZQBvXCEoyDCbydAsZni+kVxYfdVAx2Y4rUw5B4WJ0C5gIJHjyCuv+WSYio72tW5
- RK0x018dGPVn5oqFRqQbwjDCE2wlzEeINGXk/xD1ytnjxy1r/tA0XadXaHp/R1hI
- 84gQbBiLnAShRnOhCnadL/a9YjMKkYlKqQMYF02xbEG5S6Nnxqv46NkpUQCPI74a
- 7kPIKnPr0+emNp+Rnduzferfb2b+BrRlif5gjHw1+HgJ7XIDmDI4mS++YJ3GO9z/
- AgMBAAGjgbkwgbYwDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAQYwHQYDVR0OBBYE
- FMFJ2EIq4p6WJFyp3I7PN6sPyzoTMHoGA1UdIwRzMHGAFMFJ2EIq4p6WJFyp3I7P
- N6sPyzoToU6kTDBKMQswCQYDVQQGEwJjejEXMBUGA1UEAwwOU2FsdCBNYXN0ZXIg
- Q0ExDzANBgNVBAcMBlByYWd1ZTERMA8GA1UECgwITWlyYW50aXOCCQCYG/F6YpKE
- TTANBgkqhkiG9w0BAQsFAAOCAgEAi4fghF/Ink777HT1qy0MiVw61Z+ZVOhXf4OW
- +VuSVDoc6NWEpekLbtCd8VzFytjrlwVNOywueey1ZMAAYQHplvr+hYkkc1q4WFky
- qn3tFdKZzcF1jX3+fAOtl73XqvB6NknAp+PcyF32kJBnlg9bBzSyvqyu5HrzTGwg
- F2aBH4J3jcb5qUkg31PJQIcCsprz40PbBP/j6XyXw9s//Wji33a43+jmhud4LB9r
- +2ln2lleoKU7Nuu0/hdcmvXQ4qz2V+01p3/Mie0H12bEStECcyCpWxYI0GatsaOz
- mWfnw9+ZZeV+yVcNpkFDF2X7tvK8peTYeyWQRagJF49Z5HGdFn4S+98ddlIhUp16
- 5S2SMEh/nshpBLfZNTV0BQZd3GUOWgpVsTT3bsX7b8bvlidmzXRpfE5tR0ZE6d02
- jGFuYRJwLA038Bk49nznQ/CtGi9qylqR2qPsL4JkJQvQE57Bdt2obKn0aIgt3YLh
- kBFxLx930x9WzETyMPqDnrnsZPkUilXLnszBXB5W+V6u4vnZAV3yZI0/3YKoAEYp
- Lyb7L+8/YEYEWYLm7qgxX2TbTle53EJx4ze//efHOBOIS1Dmyh3JlRRHZEEPv7Mt
- FooWxfsRp7jUhWin99LTlbbp6KdmVz1K9LmrzVPgpz+ZNGXCDM5xklEnrFJy8gX5
- ptYlHCU=
- -----END CERTIFICATE-----
- oss_runbook_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxgROyM+RnJiDhS/qfXayxDbGmMqIGWsWPnc1RdMcJ9xlBM4a
- bj4iNB7wmj19oMRBXKvrvMsbnhOJ1Z1tWH1jwm3rZ7ziJlDUo1Ms/wAPXV67+ayu
- LCSp6JGTzaxo/4YTfzUvvnCJvPKuIf+BjxE6/Rzdzrp6b4FYuvOfkxN/pK4HfrrR
- wJjyQCCeXGrDcq3vKvBaZ/19MN5JtrrCRx4L42UFy1kAkNCCRir+YsK+tiDM3Tfo
- r95PNXdMyeKzMEc858D9XxK+UyNFjGrO2hZm6fmRjgWMuNnaGnVotmT1z1pB91d3
- 5q7n60d3Q7KRn6p+xStrwB7rB9+Jsi3L6q+VEQIDAQABAoIBAExCJnExdvtexO/K
- 9zxgNRJZofv/q5BWbFecIxkD50j2mLTUBtvD8/osnC5aVgJO8nkVAJFRiw5Cqgdp
- PE4i5ANhv5HQ7SsiX/GSO7bst/4WWMAbn2wCpqiZP9mqdzlI0kNgIUXvIyxwLV2M
- f8GwOg52Snmt2S8OGjTNU/wZO0QYzzi44tf2/q0QWy0EV4g2oLq66T/kKpx5FmZQ
- 0cD9GiESfmzWiq2Aivy4if7VmW4fCxTIvmUypSQf+M4J7ZR6QYUbkr19wNEiYAUq
- k9aitJNIVW0johbZwexTTF1YiIVuvSwOI/lHGz1e7iVu/hZxx35JtkzLzF9Dd01q
- M0IMXz0CgYEA95aOjqJTp2KQT++Q4uPl/K1FLNquqZ02SyUNVglkuVn6THHsTC8Q
- MfO+l39bh1QGTK/bh4dyXub2jEYfTSn1K8YMOYp57tgHTJ0Y8AZbtYaEP0g3BeO5
- Myd1/YUY+vM6h58wyoqhDLwRql5u5GM8HAibK32d+Fnrf3VSM0i4jT8CgYEAzL6Y
- c8Fu4ezRiKR1x7jSgbePADRZa7xvLKenuMMYmtg+AixEp5nmm9/vBtmrhE+RQNXw
- mQvt8EId/XGcJhv83Y+QeYg3AhsdGMIYmlGhFGJ3FtcA72wt3FTGOa2KMtmI6khL
- WqYohvESfLtCumW0XPRRUVNKF73UKjMa8VnsOa8CgYBto/CRXXUqJM2/eFlzAHUy
- hhCiIl1Co2oNsOTM+u/t3NiozbJUsmq7lDMMp8uCjEUV5LKUu/h76k+4Ir1t0GzP
- 664yNQ52JJhm5xLKCCbIpj8ePv6Ozx+OdaUclbpQNzHuKSLULrvPBeHUzmjRHtjZ
- mT4N7lzsQ/WzxeKW71c6xQKBgDGrj1qNs7O1ewO2OiiQqujzOgrnqEXdue7QYX0O
- P3rZOPnX+XPbfzmTcu5rghOgJfHftPW8EiY2NAZXOHV6Vrb9bCQ/qnClWUK3W7ac
- VQKX/KIa2Mw8p0eLfWditWMuqOuFTFqacryB4WVHHKIRqFbgopWjKhdmYwE10rR4
- hzlbAoGBAMpZ+D08Us5wrsbVlYfOobuHgq2ENPvQnZqJfTobAPGtrMk/M7M4Ga1U
- +zeO8VA0Tj5jK2qI+MIB2hZmgjp49FbejKFAD+q3srkyqwkGerNXkWOiDGmvYhKR
- UbC4GcycVQsIZK4bw0K7Pl40/u9artsAFmWOoUunyO4QH8J8EDXJ
- -----END RSA PRIVATE KEY-----
- oss_runbook_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGBE7Iz5GcmIOFL+p9drLENsaYyogZaxY+dzVF0xwn3GUEzhpuPiI0HvCaPX2gxEFcq+u8yxueE4nVnW1YfWPCbetnvOImUNSjUyz/AA9dXrv5rK4sJKnokZPNrGj/hhN/NS++cIm88q4h/4GPETr9HN3OunpvgVi685+TE3+krgd+utHAmPJAIJ5casNyre8q8Fpn/X0w3km2usJHHgvjZQXLWQCQ0IJGKv5iwr62IMzdN+iv3k81d0zJ4rMwRzznwP1fEr5TI0WMas7aFmbp+ZGOBYy42doadWi2ZPXPWkH3V3fmrufrR3dDspGfqn7FK2vAHusH34myLcvqr5UR
-
- # Experimental notification parameters
- oss_pushkin_smtp_host: '127.0.0.1'
- oss_pushkin_smtp_port: '25'
- oss_pushkin_email_sender_password: 'integration-password'
- oss_webhook_from: 'integration-ci@mirantis.com'
- oss_webhook_recipients: 'ddmitriev@mirantis.com'
- oss_webhook_app_id: '24'
- oss_webhook_login_id: '13'
-
- oss_cis_enabled: 'True'
- oss_cis_jobs_repository: https://github.com/Mirantis/rundeck-cis-jobs.git
- oss_cis_jobs_repository_branch: master
- oss_security_audit_enabled: 'True'
- oss_security_audit_os_ssl_verify: 'True'
- oss_security_audit_os_cacert_path: '/srv/volumes/rundeck/storage/content/keys/cis/openstack/cert.pem'
- oss_cleanup_service_enabled: 'True'
-
- # SFDC configuration, to be overriden from test with actual values
- sfdc_sandbox_enabled: True
- sfdc_auth_url: ''
- sfdc_username: ''
- sfdc_password: ''
- sfdc_consumer_key: ''
- sfdc_consumer_secret: ''
- sfdc_organization_id: ''
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-environment.yaml
deleted file mode 100644
index 5b1e465..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-environment.yaml
+++ /dev/null
@@ -1,204 +0,0 @@
-nodes:
- cfg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- cid01.mcp11-ovs-dpdk.local:
- reclass_storage_name: cicd_control_node01
- roles:
- - infra_kvm
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- cid02.mcp11-ovs-dpdk.local:
- reclass_storage_name: cicd_control_node02
- roles:
- - infra_kvm
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- cid03.mcp11-ovs-dpdk.local:
- reclass_storage_name: cicd_control_node03
- roles:
- - infra_kvm
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_designate_database
- - features_designate
- - features_designate_keystone
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- ctl02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_database
- - features_designate
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- ctl03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_database
- - features_designate
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- prx01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - features_designate_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- prx02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - features_designate_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens5:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens5:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node02
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens5:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens6:
- role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
deleted file mode 100644
index f5b4f73..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
+++ /dev/null
@@ -1,69 +0,0 @@
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','virtual-mcp-ocata-dop-sl2') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-cicd_oss.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-- description: Temporary workaround for PROD-15890 for downgrade packages
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.system.repo;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' cmd.run "apt-get update"
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' cmd.run "apt-get install -y --allow-downgrades vlan";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
deleted file mode 100644
index 7bc48a4..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
+++ /dev/null
@@ -1,241 +0,0 @@
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-ocata-dop-sl2/salt.yaml' import ENVIRONMENT_MODEL_INVENTORY_NAME with context %}
-
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-#Launch containers
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
- cmd: |
- FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Fluentd service presence: ${FLUENTD_SERVICE}";
- if [[ "$FLUENTD_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- else
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
- cmd: |
- CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Ceilometer service presence: ${CEILO}";
- if [[ "$CEILO" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Remote Collector in Docker Swarm for Openstack deployments
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 3e70fd8..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml
deleted file mode 100644
index 319c007..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
deleted file mode 100644
index 77b2573..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
+++ /dev/null
@@ -1,665 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-ocata-dop-sl2') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-{% import 'cookied-mcp-ocata-dop-sl2/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'cookied-mcp-ocata-dop-sl2/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_1604_HWE with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_1604_hwe {{ CLOUDINIT_USER_DATA_1604_HWE }}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
-
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
-
- default_{{ HOSTNAME_MON }}: +70
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
-
- default_{{ HOSTNAME_CTL }}: +10
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_GTW01 }}: +224
- default_{{ HOSTNAME_GTW02 }}: +225
- default_{{ HOSTNAME_PRX }}: +80
- default_{{ HOSTNAME_PRX01 }}: +81
- default_{{ HOSTNAME_PRX02 }}: +82
-
- default_{{ HOSTNAME_CMP01 }}: +101
- default_{{ HOSTNAME_CMP02 }}: +102
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
-
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
-
- default_{{ HOSTNAME_MON }}: +70
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
-
- default_{{ HOSTNAME_CTL }}: +10
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_GTW01 }}: +224
- default_{{ HOSTNAME_GTW02 }}: +225
- default_{{ HOSTNAME_PRX }}: +80
- default_{{ HOSTNAME_PRX01 }}: +81
- default_{{ HOSTNAME_PRX02 }}: +82
-
- default_{{ HOSTNAME_CMP01 }}: +101
- default_{{ HOSTNAME_CMP02 }}: +102
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
-
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
-
- default_{{ HOSTNAME_MON }}: +70
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
-
- default_{{ HOSTNAME_CTL }}: +10
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_GTW01 }}: +6
- default_{{ HOSTNAME_GTW02 }}: +7
- default_{{ HOSTNAME_PRX }}: +80
- default_{{ HOSTNAME_PRX01 }}: +81
- default_{{ HOSTNAME_PRX02 }}: +82
-
- default_{{ HOSTNAME_CMP01 }}: +101
- default_{{ HOSTNAME_CMP02 }}: +102
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
-
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
-
- default_{{ HOSTNAME_MON }}: +70
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
-
- default_{{ HOSTNAME_CTL }}: +10
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_GTW01 }}: +224
- default_{{ HOSTNAME_GTW02 }}: +225
- default_{{ HOSTNAME_PRX }}: +80
- default_{{ HOSTNAME_PRX01 }}: +81
- default_{{ HOSTNAME_PRX02 }}: +82
-
- default_{{ HOSTNAME_CMP01 }}: +101
- default_{{ HOSTNAME_CMP02 }}: +102
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: {{ os_env('CONNECTION_STRING', 'qemu:///system') }}
- storage_pool_name: {{ os_env('STORAGE_POOL_NAME', 'default') }}
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: {{ os_env('DRIVER_USE_HOST_CPU', true) }}
- use_hugepages: {{ os_env('DRIVER_USE_HUGEPAGES', false) }}
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- private_br:
- vlan_ifaces:
- - 10
-
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
- parent_iface:
- l2_net_dev: private_br
- tag: 10
-
- external:
- address_pool: external-pool01
- dhcp: false
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: {{ os_env('IMAGE_PATH1604') }} # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: {{ os_env('CFG_NODE_CPU', 2) }}
- memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private_br
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CID01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private_br
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CID02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID03 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CTL_NODE_CPU', 3) }}
- memory: {{ os_env('CTL_NODE_MEMORY', 14000) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CTL_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CTL_NODE_CPU', 3) }}
- memory: {{ os_env('CTL_NODE_MEMORY', 14000) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CTL_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CTL_NODE_CPU', 3) }}
- memory: {{ os_env('CTL_NODE_MEMORY', 14000) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CTL_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('MON_NODE_CPU', 2) }}
- memory: {{ os_env('MON_NODE_MEMORY', 14000) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('MON_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('MON_NODE_CPU', 2) }}
- memory: {{ os_env('MON_NODE_MEMORY', 14000) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('MON_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('MON_NODE_CPU', 2) }}
- memory: {{ os_env('MON_NODE_MEMORY', 14000) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('MON_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('PRX_NODE_CPU', 1) }}
- memory: {{ os_env('PRX_NODE_MEMORY', 2048) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('MON_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CMP_NODE_CPU', 2) }}
- memory: {{ os_env('CMP_NODE_MEMORY', 3072) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CMP_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private_br
- interface_model: *interface_model
- - label: ens5
- l2_network_device: private_br
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - private
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CMP_NODE_CPU', 2) }}
- memory: {{ os_env('CMP_NODE_MEMORY', 3072) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CMP_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('GTW_NODE_CPU', 4) }}
- memory: {{ os_env('GTW_NODE_MEMORY', 4096) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('GTW_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/core.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/core.yaml
deleted file mode 100644
index af81722..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/core.yaml
+++ /dev/null
@@ -1,137 +0,0 @@
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-- description: Refresh grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Run salt minion to create cert files for nginx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install nginx on prx nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
deleted file mode 100644
index a4b52a5..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
+++ /dev/null
@@ -1,380 +0,0 @@
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set PATTERN = os_env('PATTERN', 'smoke') %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# isntall designate
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'ctl*' state.sls powerdns
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-#- description: Allow all tcp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-#- description: Allow all icmp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temp workaround of PROD-13167
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run
- 'apt-get install python-pymysql -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: iptables --policy FORWARD ACCEPT
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
deleted file mode 100644
index fd84b59..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
+++ /dev/null
@@ -1,166 +0,0 @@
-default_context:
- mcp_version: testing
- cicd_control_node01_address: 10.167.4.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.4.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.4.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.4.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: <<WILL_BE_GENERATED>>
- cicd_public_key: <<WILL_BE_GENERATED>>
- cluster_domain: deploy-name.local
- cluster_name: deployment_name
- deployment_type: physical
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- control_network_netmask: 255.255.255.0
- control_vlan: '10'
- deploy_network_gateway: ''
- deploy_network_netmask: 255.255.255.0
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_deploy_nic: eth0
- maas_deploy_address: 10.167.4.91
- maas_hostname: mas01
- upstream_proxy_enabled: True
- upstream_proxy_address: 10.167.5.1
- upstream_proxy_port: 8080
- infra_kvm01_control_address: 10.167.4.241
- infra_kvm01_deploy_address: 10.167.5.241
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.4.242
- infra_kvm02_deploy_address: 10.167.5.242
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.4.243
- infra_kvm03_deploy_address: 10.167.5.243
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.4.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- opencontrail_enabled: 'False'
- openldap_enabled: 'False'
- openstack_compute_count: '100'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.4
- openstack_compute_rack01_tenant_subnet: 10.167.6
- openstack_control_address: 10.167.4.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.4.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.4.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.4.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.4.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.4.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.4.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.4.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 10.167.4.224
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.167.6.6
- openstack_gateway_node02_address: 10.167.4.225
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.167.6.7
- openstack_gateway_node03_address: 10.167.4.226
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.167.6.8
- openstack_message_queue_address: 10.167.4.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.4.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.4.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.4.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: ovs
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 10.167.4.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.4.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.4.82
- openstack_proxy_node02_hostname: prx02
- openstack_benchmark_node01_hostname: bmk01
- openstack_benchmark_node01_address: 10.167.4.85
- openstack_version: ocata
- public_host: ${_param:openstack_proxy_address}
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_master_address: 10.167.4.15
- salt_master_hostname: cfg01
- salt_master_management_address: 10.167.5.15
- stacklight_enabled: 'True'
- fluentd_enabled: 'True'
- stacklight_version: '2'
- stacklight_long_term_storage_type: influxdb
- stacklight_log_address: 10.167.4.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.4.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.4.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.4.63
- stacklight_log_node03_hostname: log03
- stacklight_monitor_address: 10.167.4.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.4.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.4.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.4.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 10.167.4.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.4.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.4.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.4.88
- stacklight_telemetry_node03_hostname: mtr03
- tenant_network_gateway: ''
- tenant_network_netmask: 255.255.255.0
- tenant_vlan: '20'
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml
deleted file mode 100644
index 2141209..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml
+++ /dev/null
@@ -1,354 +0,0 @@
-nodes:
- cfg01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- kvm01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- kvm02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- kvm03.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- cid01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: cicd_control_node01
- roles:
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- cid02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: cicd_control_node02
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- cid03.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: cicd_control_node03
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- ctl01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - features_designate
- - features_designate_keystone
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- ctl02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - features_designate
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- ctl03.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - features_designate
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- dbs01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - features_designate_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- dbs02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database
- - features_designate_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- dbs03.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database
- - features_designate_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- msg01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- msg02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- msg03.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- prx01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - features_designate_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- prx02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - features_designate_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mon01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mon02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mon03.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mtr01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mtr02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mtr03.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- log01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- log02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- log03.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens5:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens5:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_gateway_node02
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens5:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens6:
- role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
deleted file mode 100644
index 829d515..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','virtual-devops-mcp-ocata-sl2') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
deleted file mode 100644
index c71f82d..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
+++ /dev/null
@@ -1,239 +0,0 @@
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-#Launch containers
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
- cmd: |
- FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Fluentd service presence: ${FLUENTD_SERVICE}";
- if [[ "$FLUENTD_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- else
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
- cmd: |
- CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Ceilometer service presence: ${CEILO}";
- if [[ "$CEILO" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Remote Collector in Docker Swarm for Openstack deployments
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 19ae10b..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604-hwe.yaml
deleted file mode 100644
index 319c007..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604-hwe.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml
deleted file mode 100644
index 319c007..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
deleted file mode 100644
index a74e3d7..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ /dev/null
@@ -1,188 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- designate_backend: bind
- cluster_domain: cookied-mcp-ocata-dvr.local
- cluster_name: cookied-mcp-ocata-dvr
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: ocata
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
- stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-environment.yaml
deleted file mode 100644
index 3a11834..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-environment.yaml
+++ /dev/null
@@ -1,182 +0,0 @@
-nodes:
- cfg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9
- - features_designate_bind9_keystone
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - features_designate_bind9_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- dns01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node01
- roles:
- - features_designate_bind9_dns
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.extra
- - system.linux.system.repo.mcp.apt_mirantis.openstack
- - system.linux.system.repo.mcp.apt_mirantis.ubuntu
- - system.linux.system.repo.mcp.apt_mirantis.saltstack
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node01_address}
-
- dns02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node02
- roles:
- - features_designate_bind9_dns
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.extra
- - system.linux.system.repo.mcp.apt_mirantis.openstack
- - system.linux.system.repo.mcp.apt_mirantis.ubuntu
- - system.linux.system.repo.mcp.apt_mirantis.saltstack
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node02_address}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/core.yaml
deleted file mode 100644
index fc5d4f8..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/openstack.yaml
deleted file mode 100644
index dc9de1c..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/openstack.yaml
+++ /dev/null
@@ -1,188 +0,0 @@
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
-{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-# Install OpenStack control services
-
-{%- if OVERRIDE_POLICY != '' %}
-- description: Upload policy override
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: overrides-policy.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Create custom cluster control class
- cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Rename control classes
- cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
- ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
-{%- endif %}
-
-# Deploy nginx before openstack services (PROD-22740)
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-# install designate backend
-- description: Install bind
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@bind:server' state.sls bind
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://images.mirantis.com.s3.amazonaws.com/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set gateway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-ocata-dvr/overrides-policy.yml
deleted file mode 100644
index 1f35a6b..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/overrides-policy.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-parameters:
- nova:
- controller:
- policy:
- context_is_admin: 'role:admin or role:administrator'
- 'compute:create': 'rule:admin_or_owner'
- 'compute:create:attach_network':
- cinder:
- controller:
- policy:
- 'volume:delete': 'rule:admin_or_owner'
- 'volume:extend':
- neutron:
- server:
- policy:
- create_subnet: 'rule:admin_or_network_owner'
- 'get_network:queue_id': 'rule:admin_only'
- 'create_network:shared':
- glance:
- server:
- policy:
- publicize_image: "role:admin"
- add_member:
- keystone:
- server:
- policy:
- admin_or_token_subject: 'rule:admin_required or rule:token_subject'
- heat:
- server:
- policy:
- context_is_admin: 'role:admin and is_admin_project:True'
- deny_stack_user: 'not role:heat_stack_user'
- deny_everybody: '!'
- 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
- 'cloudformation:DescribeStackResources':
- ceilometer:
- server:
- policy:
- segregation: 'rule:context_is_admin'
- 'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/salt.yaml
deleted file mode 100644
index 9d3deb7..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/salt.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-ocata-dvr/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':', 1) %}
-- description: Override cluster parameters
- cmd: |
- salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endfor %}
-
-- description: Refresh pillar
- cmd: salt '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endif %}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-# WORKAROUND PROD-21071
-- description: Set correct pin for openstack repository
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin: release l=ocata/Pin: release l=xenial\/openstack\/ocata testing/g' /etc/apt/preferences.d/mirantis_openstack"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/sl.yaml
deleted file mode 100644
index 405e647..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/sl.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
deleted file mode 100644
index d75dab1..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- # Enable grub menu using updated config below
- - update-grub
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay.yaml
deleted file mode 100644
index 4893e2c..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay.yaml
+++ /dev/null
@@ -1,575 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-ocata-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-ocata-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-ocata-dvr') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-ocata-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
deleted file mode 100644
index 2a6d8f9..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
+++ /dev/null
@@ -1,187 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-ocata-ovs.local
- cluster_name: cookied-mcp-ocata-ovs
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: U1hx5V31VJfFFBu8fCsk9ebDN2TwuBABTIcptYQ8tmFSlhSxHIkKnJnDsnckgKnH
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'False'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: ocata
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: PGah7Ph3IdWuMdAX3ZBLSf5BtlBG1Qhl
- salt_api_password_hash: $6$kgvztcjH$9B2950AyxRjE2Tj5QNVCnvdrgaFo/u6c59pMoQPqfxs2MTLLU7ywxPTQnDH3cNV.BBEK6FilF9SulWfIfENou0
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
- stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-environment.yaml
deleted file mode 100644
index 4970aec..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-environment.yaml
+++ /dev/null
@@ -1,150 +0,0 @@
-nodes:
- cfg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
- - features_designate_bind9_keystone
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - features_designate_bind9_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/core.yaml
deleted file mode 100644
index 6fc2af4..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/openstack.yaml
deleted file mode 100644
index 4072632..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/openstack.yaml
+++ /dev/null
@@ -1,164 +0,0 @@
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-# Deploy nginx before openstack services (PROD-22740)
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-# install designate backend
-- description: Install bind
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@bind:server' state.sls bind
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://images.mirantis.com.s3.amazonaws.com/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set gateway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/salt.yaml
deleted file mode 100644
index 41827c7..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/salt.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-ocata-ovs/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':', 1) %}
-- description: Override cluster parameters
- cmd: |
- salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endfor %}
-
-- description: Refresh pillar
- cmd: salt '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endif %}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-# WORKAROUND PROD-21071
-- description: Set correct pin for openstack repository
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin: release l=ocata/Pin: release l=xenial\/openstack\/ocata testing/g' /etc/apt/preferences.d/mirantis_openstack"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/sl.yaml
deleted file mode 100644
index 7cc598b..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/sl.yaml
+++ /dev/null
@@ -1,176 +0,0 @@
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay.yaml
deleted file mode 100644
index 2d31a5a..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay.yaml
+++ /dev/null
@@ -1,512 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-ocata-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-ocata-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-ocata-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-ocata-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data1604.yaml
index 3fbb777..f8b58f5 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data1604.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -48,3 +51,24 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/core.yaml
deleted file mode 100644
index 08a3c00..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/core.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
deleted file mode 100644
index 8531cc3..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-{% import 'shared-ceph.yaml' as SHARED_CEPH with context %}
-
-{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MONS() }}
-
-{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MGR() }}
-
-{{ SHARED_CEPH.MACRO_INSTALL_CEPH_OSD_AND_RADOSGW() }}
-
-{{ SHARED_CEPH.CONNECT_CEPH_TO_SERVICES() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_REDIS() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GNOCCHI() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_PANKO() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CEILOMETER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_AODH() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/salt.yaml
deleted file mode 100644
index e90b99b..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/salt.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data1604.yaml
deleted file mode 100644
index 979424f..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
deleted file mode 100644
index b36f8be..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
+++ /dev/null
@@ -1,767 +0,0 @@
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-
-{% import 'cookied-mcp-pike-dvr-ceph/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-pike-dvr-ceph/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-dvr-ceph') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW01 = os_env('HOSTNAME_CMN01', 'rgw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW02 = os_env('HOSTNAME_CMN02', 'rgw02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW03 = os_env('HOSTNAME_CMN03', 'rgw03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd1.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd2.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-pike-dvr-ceph_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_MDB01 }}: +84
- default_{{ HOSTNAME_MDB02 }}: +85
- default_{{ HOSTNAME_MDB03 }}: +86
- ip_ranges:
- dhcp: [+70, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_MDB01 }}: +84
- default_{{ HOSTNAME_MDB02 }}: +85
- default_{{ HOSTNAME_MDB03 }}: +86
- ip_ranges:
- dhcp: [+70, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_MDB01 }}: +84
- default_{{ HOSTNAME_MDB02 }}: +85
- default_{{ HOSTNAME_MDB03 }}: +86
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_MDB01 }}: +84
- default_{{ HOSTNAME_MDB02 }}: +85
- default_{{ HOSTNAME_MDB03 }}: +86
- ip_ranges:
- dhcp: [+130, +230]
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: ceph_osd
- capacity: 50
- format: qcow2
- - name: ceph_journal
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: ceph_osd
- capacity: 50
- format: qcow2
- - name: ceph_journal
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_RGW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_RGW02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
- - name: {{ HOSTNAME_RGW03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
deleted file mode 100644
index cc7acd1..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
+++ /dev/null
@@ -1,207 +0,0 @@
-nodes:
- cfg01.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- osd<<count>>.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: ceph_osd_rack01
- roles:
- - ceph_osd
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- cmn01.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: ceph_mon_node01
- roles:
- - ceph_mon
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- cmn02.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: ceph_mon_node02
- roles:
- - ceph_mon
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- cmn03.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: ceph_mon_node03
- roles:
- - ceph_mon
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- rgw01.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: ceph_rgw_node01
- roles:
- - ceph_rgw
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- rgw02.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: ceph_rgw_node02
- roles:
- - ceph_rgw
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- rgw03.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: ceph_rgw_node03
- roles:
- - ceph_rgw
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mdb01.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: openstack_telemetry_node01
- roles:
- - linux_system_codename_xenial
- - openstack_telemetry
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mdb02.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: openstack_telemetry_node02
- roles:
- - linux_system_codename_xenial
- - openstack_telemetry
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mdb03.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: openstack_telemetry_node03
- roles:
- - linux_system_codename_xenial
- - openstack_telemetry
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/salt.yaml
index df13ee9..ebfa366 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/salt.yaml
@@ -17,6 +17,7 @@
- description: "Temp fix"
cmd: |
set -e;
+ apt-get install virtualenv -y;
apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
[[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
. /root/venv-reclass-tools/bin/activate;
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml
index 3fbb777..fd1527a 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -48,3 +51,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
index c67a1ac..9cf67fa 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
@@ -17,6 +17,7 @@
- description: "Temp fix"
cmd: |
set -e;
+ apt-get install virtualenv -y;
apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
[[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
. /root/venv-reclass-tools/bin/activate;
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data1604.yaml
index 3fbb777..fd1527a 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data1604.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -48,3 +51,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
index 64031ea..6bd2435 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
@@ -227,6 +227,10 @@
openstack_share_hostname: share
openstack_share_node01_hostname: share01
openstack_octavia_enabled: 'True'
+ octavia_health_manager_node01_address: 192.168.1.10
+ octavia_health_manager_node02_address: 192.168.1.11
+ octavia_health_manager_node03_address: 192.168.1.12
+ octavia_manager_cluster: 'True'
octavia_hm_bind_ip: 192.168.1.12
octavia_lb_mgmt_cidr: 192.168.1.0/24
octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data1604.yaml
index 3fbb777..fd1527a 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data1604.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -48,3 +51,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data1604.yaml
index 3fbb777..fd1527a 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data1604.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -48,3 +51,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
index 636187b..e9cd9a1 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
@@ -1,8 +1,8 @@
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml
index 979424f..fd1527a 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -47,3 +50,26 @@
content: |
auto ens3
iface ens3 inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/salt.yaml
index edfadef..d786848 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/salt.yaml
@@ -17,6 +17,7 @@
- description: "Temp fix"
cmd: |
set -e;
+ apt-get install virtualenv -y;
apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
[[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
. /root/venv-reclass-tools/bin/activate;
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data1604.yaml
index 3fbb777..fd1527a 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data1604.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -48,3 +51,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/salt.yaml
index eaf8a1f..5b0ce58 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/salt.yaml
@@ -17,6 +17,7 @@
- description: "Temp fix"
cmd: |
set -e;
+ apt-get install virtualenv -y;
apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
[[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
. /root/venv-reclass-tools/bin/activate;
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data1604.yaml
index 3fbb777..fd1527a 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data1604.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -48,3 +51,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data1604.yaml
index 3fbb777..fd1527a 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data1604.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -48,3 +51,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data1604.yaml
index 3fbb777..fd1527a 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data1604.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -48,3 +51,25 @@
auto ens3
iface ens3 inet dhcp
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml
index ecd3224..a830c3f 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml
@@ -1,20 +1,16 @@
{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-baremetal-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set LAB_CONFIG_NAME = 'cookied-bm-dpdk-pipeline' %}
-{% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-bm-dpdk-pipeline') %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dpdk.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-environment.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-environment.yaml', 'salt-context-cookiecutter-openstack_ovs_dpdk.yaml'] %}
{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2416') %}
{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2417') %}
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/" + SALT_VERSION + REPOSITORY_SUITE + " main") %}
-{# set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE+ "/saltstack-2016.3/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") #}
+
{% import 'shared-salt.yaml' as SHARED with context %}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
index 9a830b9..e6c7313 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
@@ -2,16 +2,14 @@
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
{% set LAB_CONFIG_NAME = 'cookied-bm-mcp-dvr-vxlan' %}
-{% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-bm-mcp-dvr-vxlan') %}
# Path to the context files used to render Cluster and Environment models
{% set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml' %}
-{% set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-lab03-environment.yaml'] %}
+{% set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-lab03-environment.yaml', 'salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml'] %}
{% set CONTROL_VLAN = os_env('CONTROL_VLAN', '2404') %}
{% set TENANT_VLAN = os_env('TENANT_VLAN', '2406') %}
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE+ "/saltstack-2016.3/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") %}
+
{% import 'shared-salt.yaml' as SHARED with context %}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
index 9970edd..c091213 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
@@ -6,7 +6,7 @@
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-mcp-ocata-contrail') %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail-dpdk.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml', 'salt-context-cookiecutter-contrail-dpdk.yaml'] %}
{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
index b8bb9af..aa74df1 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
@@ -1,20 +1,16 @@
{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-baremetal-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set LAB_CONFIG_NAME = 'cookied-bm-mcp-ovs-dpdk' %}
-{% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-bm-mcp-ovs-dpdk') %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dpdk.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-environment.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-environment.yaml','alt-context-cookiecutter-openstack_ovs_dpdk.yaml'] %}
{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2416') %}
{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2417') %}
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/" + SALT_VERSION + REPOSITORY_SUITE + " main") %}
-{# set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE+ "/saltstack-2016.3/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") #}
+
{% import 'shared-salt.yaml' as SHARED with context %}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-k8s-contrail40-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-k8s-contrail40-maas.yaml
new file mode 100644
index 0000000..52098cf
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-k8s-contrail40-maas.yaml
@@ -0,0 +1,53 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-cicd-bm-k8s-contrail40-maas' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-cicd-bm-k8s-contrail40-maas') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-k8s-contrail.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml','salt-context-cookiecutter-k8s-contrail.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2410') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2411') %}
+
+{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
+{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+ cmd: |
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+ cmd: |
+ sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: "Add user/password for IPMI access"
+ cmd: |
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-ocata-contrail-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-ocata-contrail-maas.yaml
new file mode 100644
index 0000000..9bc9ff9
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-ocata-contrail-maas.yaml
@@ -0,0 +1,68 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-cicd-bm-ocata-contrail-maas' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-cicd-bm-ocata-contrail-maas') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml', 'salt-context-cookiecutter-contrail.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
+
+{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
+{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: Temporary WR for cinder backend defined by default in reclass.system
+ cmd: |
+ sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+ cmd: |
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
+- description: Defining username and password params for IPMI access
+ cmd: |
+ sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: "Add user/password for IPMI access"
+ cmd: |
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary workaround !! Fix or debug
+ cmd: |
+ sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+ sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail32-maas-2018.8.0.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail32-maas-2018.8.0.yaml
new file mode 100644
index 0000000..127b860
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail32-maas-2018.8.0.yaml
@@ -0,0 +1,108 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-cicd-bm-os-contrail32-maas-2018.8.0' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-cicd-bm-os-contrail32-maas-2018.8.0') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml', 'salt-context-cookiecutter-contrail.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
+
+{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
+{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: Temporary WR for cinder backend defined by default in reclass.system
+ cmd: |
+ sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+ cmd: |
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+ cmd: |
+ sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
+ sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
+ # For 2018.11.0+ :
+ # sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ # sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: "Add user/password for IPMI access"
+ cmd: |
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
+ reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
+ # For 2018.11.0+ :
+ # reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ # reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: "Workaround of bug PROD-22286 for VCP image path (MCP 2018.8.0)"
+ cmd: |
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary workaround !! Fix or debug
+ cmd: |
+ sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+ sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: "Workaround for correct repositories for salt during maas bootstrap"
+ cmd: |
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters.maas.cluster.saltstack_repo_key '${linux:system:repo:mcp_saltstack:key}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
+ reclass-tools add-key parameters.maas.cluster.saltstack_repo_trusty 'deb [arch=amd64] ${_param:linux_system_repo_mcp_saltstack_url}/trusty/ trusty main' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
+ reclass-tools add-key parameters.maas.cluster.saltstack_repo_xenial 'deb [arch=amd64] ${_param:linux_system_repo_mcp_saltstack_url}/xenial/ xenial main' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: "Workaround of hardcoded apt_mk_version in docker repo"
+ cmd: |
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key 'classes' 'system.linux.system.repo.mcp.apt_mirantis.docker' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_init.yml --merge;
+ reclass-tools add-key parameters._param.apt_mk_version {{ SHARED.REPOSITORY_SUITE }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_init.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail40-maas-2018.8.0.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail40-maas-2018.8.0.yaml
new file mode 100644
index 0000000..9ede30d
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail40-maas-2018.8.0.yaml
@@ -0,0 +1,108 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-cicd-bm-os-contrail40-maas-2018.8.0' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-cicd-bm-os-contrail40-maas-2018.8.0') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml', 'salt-context-cookiecutter-contrail.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
+
+{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
+{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: Temporary WR for cinder backend defined by default in reclass.system
+ cmd: |
+ sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+ cmd: |
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+ cmd: |
+ sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
+ sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
+ # For 2018.11.0+ :
+ # sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ # sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: "Add user/password for IPMI access"
+ cmd: |
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
+ reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
+ # For 2018.11.0+ :
+ # reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ # reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: "Workaround of bug PROD-22286 for VCP image path (MCP 2018.8.0)"
+ cmd: |
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary workaround !! Fix or debug
+ cmd: |
+ sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+ sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: "Workaround for correct repositories for salt during maas bootstrap"
+ cmd: |
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters.maas.cluster.saltstack_repo_key '${linux:system:repo:mcp_saltstack:key}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
+ reclass-tools add-key parameters.maas.cluster.saltstack_repo_trusty 'deb [arch=amd64] ${_param:linux_system_repo_mcp_saltstack_url}/trusty/ trusty main' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
+ reclass-tools add-key parameters.maas.cluster.saltstack_repo_xenial 'deb [arch=amd64] ${_param:linux_system_repo_mcp_saltstack_url}/xenial/ xenial main' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: "Workaround of hardcoded apt_mk_version in docker repo"
+ cmd: |
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key 'classes' 'system.linux.system.repo.mcp.apt_mirantis.docker' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_init.yml --merge;
+ reclass-tools add-key parameters._param.apt_mk_version {{ SHARED.REPOSITORY_SUITE }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_init.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail40-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail40-maas.yaml
new file mode 100644
index 0000000..17ad597
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail40-maas.yaml
@@ -0,0 +1,68 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-cicd-bm-os-contrail40-maas' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-cicd-bm-os-contrail40-maas') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml', 'salt-context-cookiecutter-contrail.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
+
+{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
+{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: Temporary WR for cinder backend defined by default in reclass.system
+ cmd: |
+ sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+ cmd: |
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
+- description: Defining username and password params for IPMI access
+ cmd: |
+ sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: "Add user/password for IPMI access"
+ cmd: |
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary workaround !! Fix or debug
+ cmd: |
+ sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+ sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-queens-contrail-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-queens-contrail-maas.yaml
new file mode 100644
index 0000000..5c65691
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-queens-contrail-maas.yaml
@@ -0,0 +1,68 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-cicd-bm-queens-contrail-maas' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-cicd-bm-queens-contrail-maas') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml', 'salt-context-cookiecutter-contrail.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
+
+{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
+{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: Temporary WR for cinder backend defined by default in reclass.system
+ cmd: |
+ sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+ cmd: |
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
+- description: Defining username and password params for IPMI access
+ cmd: |
+ sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: "Add user/password for IPMI access"
+ cmd: |
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary workaround !! Fix or debug
+ cmd: |
+ sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+ sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
index 7cf52a7..69b7cdb 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
@@ -6,7 +6,7 @@
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-k8s-sl.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context-k8s-sl.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context-k8s-sl.yaml', 'cookiecutter-context-k8s-sl.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
index 130b3b3..8e8537c 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
@@ -6,7 +6,7 @@
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-k8s.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context-k8s.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context-k8s.yaml', 'cookiecutter-context-k8s.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
index 65f3c2b..51e7b5f 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
@@ -6,7 +6,7 @@
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-k8s-genie.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context-k8s-genie.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context-k8s-genie.yaml', 'cookiecutter-context-k8s-genie.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-ovs-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-ovs-maas.yaml
new file mode 100644
index 0000000..7ce4d69
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-ovs-maas.yaml
@@ -0,0 +1,74 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+# Other salt model repository parameters see in shared-salt.yaml
+{% set LAB_CONFIG_NAME = 'cookied-cicd-ovs-maas' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-cicd-ovs-maas') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-environment.yaml','salt-context-cookiecutter-openstack_ovs.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2404') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2406') %}
+
+{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
+{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
+
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+ cmd: |
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
+ salt '*' saltutil.refresh_pillar;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
+- description: Defining username and password params for IPMI access
+ cmd: |
+ sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: "Add user/password for IPMI access"
+ cmd: |
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary workaround !! Fix or debug
+ cmd: |
+ sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+ sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Temporary workaround for removing virtual gtw nodes
+ cmd: |
+ sed -i 's/\-\ system\.salt\.control\.sizes\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/\-\ system\.salt\.control\.placement\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml
index e31a230..fe171a9 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml
@@ -6,7 +6,7 @@
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-pike-ovs-dpdk.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context.yaml', 'cookiecutter-context-pike-ovs-dpdk.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-ceph.yaml
similarity index 95%
rename from tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
rename to tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-ceph.yaml
index d4377b7..25c1f9a 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-ceph.yaml
@@ -1,16 +1,17 @@
{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-{% set LAB_CONFIG_NAME = 'cookied-mcp-pike-dvr-ceph' %}
+{% set LAB_CONFIG_NAME = 'cookied-cicd-pike-dvr-ceph' %}
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-dvr-ceph.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['vcp-context-environment.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment_context.yaml', 'cookiecutter-context-dvr-ceph.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml
index 55d6a8b..b4e0202 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml
@@ -6,7 +6,7 @@
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-pike-dvr-sl.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment_context.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment_context.yaml', 'cookiecutter-context-pike-dvr-sl.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml
index c9961c2..0fd346d 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml
@@ -6,7 +6,7 @@
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-pike-ovs-sl.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context.yaml', 'cookiecutter-context-pike-ovs-sl.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml
index 0c7d928..1bda65e 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml
@@ -6,7 +6,7 @@
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-queens-dvr-sl.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment_context.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment_context.yaml', 'cookiecutter-context-queens-dvr-sl.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
@@ -21,10 +21,10 @@
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
# salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
# salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
@@ -32,6 +32,9 @@
# salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
. /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
@@ -44,12 +47,4 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: "Temporary workaround: remove cinder-volume from CTL nodes"
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr-ceph.yaml
deleted file mode 100644
index 4cdda3b..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr-ceph.yaml
+++ /dev/null
@@ -1,94 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-mitaka-dvr-ceph' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-mitaka-dvr-ceph.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', 'deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME} ' + REPOSITORY_SUITE + ' salt extra') %}
-{% set FORMULA_GPG = os_env('FORMULA_GPG', 'http://apt.mirantis.com/public.gpg') %}
-{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/" + SALT_VERSION + REPOSITORY_SUITE + " main") %}
-{# set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/2016.3 " + REPOSITORY_SUITE + " main") #}
-{% set SALT_GPG = os_env('SALT_GPG', 'http://apt.mirantis.com/public.gpg') %}
-{% set UBUNTU_REPOSITORY = os_env('UBUNTU_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME} main restricted universe") %}
-{% set UBUNTU_UPDATES_REPOSITORY = os_env('UBUNTU_UPDATES_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-updates main restricted universe") %}
-{% set UBUNTU_SECURITY_REPOSITORY = os_env('UBUNTU_SECURITY_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-security main restricted universe") %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
-
- # Bind9 services are placed on the first two ctl nodes
- salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Disable designate worker for Mitaka release"
- cmd: |
- set -e;
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_bind9/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_pool_manager/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate/init.yml
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Temporary workaround !! Fix or debug
- cmd: |
- sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
- sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
deleted file mode 100644
index a54ce3d..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
+++ /dev/null
@@ -1,85 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-mitaka-dvr' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-mitaka-dvr.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', 'deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME} ' + REPOSITORY_SUITE + ' salt extra') %}
-{% set FORMULA_GPG = os_env('FORMULA_GPG', 'http://apt.mirantis.com/public.gpg') %}
-{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/" + SALT_VERSION + REPOSITORY_SUITE + " main") %}
-{# set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/2016.3 " + REPOSITORY_SUITE + " main") #}
-{% set SALT_GPG = os_env('SALT_GPG', 'http://apt.mirantis.com/public.gpg') %}
-{% set UBUNTU_REPOSITORY = os_env('UBUNTU_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME} main restricted universe") %}
-{% set UBUNTU_UPDATES_REPOSITORY = os_env('UBUNTU_UPDATES_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-updates main restricted universe") %}
-{% set UBUNTU_SECURITY_REPOSITORY = os_env('UBUNTU_SECURITY_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-security main restricted universe") %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
- salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # Workaround of missing reclass.system for dns role
- salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Disable designate worker for Mitaka release"
- cmd: |
- set -e;
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_bind9/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_pool_manager/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate/init.yml
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml
deleted file mode 100644
index bd28102..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml
+++ /dev/null
@@ -1,83 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
-{% set LAB_CONFIG_NAME = 'cookied-mcp-mitaka-ovs' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-mitaka-ovs.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', 'deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME} ' + REPOSITORY_SUITE + ' salt extra') %}
-{% set FORMULA_GPG = os_env('FORMULA_GPG', 'http://apt.mirantis.com/public.gpg') %}
-{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/" + SALT_VERSION + REPOSITORY_SUITE + " main") %}
-{% set SALT_GPG = os_env('SALT_GPG', 'http://apt.mirantis.com/public.gpg') %}
-{% set UBUNTU_REPOSITORY = os_env('UBUNTU_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME} main restricted universe") %}
-{% set UBUNTU_UPDATES_REPOSITORY = os_env('UBUNTU_UPDATES_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-updates main restricted universe") %}
-{% set UBUNTU_SECURITY_REPOSITORY = os_env('UBUNTU_SECURITY_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-security main restricted universe") %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
- salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # Workaround of missing reclass.system for dns role
- salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Disable designate worker for Mitaka release"
- cmd: |
- set -e;
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_bind9/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_pool_manager/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate/init.yml
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml
deleted file mode 100644
index 948b051..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-newton-dvr' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-newton-dvr.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
- salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # Workaround of missing reclass.system for dns role
- salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Disable designate worker for Newton release"
- cmd: |
- set -e;
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_bind9/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_pool_manager/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate/init.yml
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml
deleted file mode 100644
index ee24ff1..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-newton-ovs' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-newton-ovs.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
- # Bind9 services are placed on the first two ctl nodes
- salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Disable designate worker for Newton release"
- cmd: |
- set -e;
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_bind9/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_pool_manager/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate/init.yml
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml
deleted file mode 100644
index 3211797..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-ocata-dop-sl2' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-cicd_oss.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr-vxlan.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr-vxlan.yaml
deleted file mode 100644
index 38a1d10..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr-vxlan.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-ocata-dvr-vxlan' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
deleted file mode 100644
index 7adb184..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
+++ /dev/null
@@ -1,64 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-ocata-dvr' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-ocata-dvr.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
- salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # Workaround of missing reclass.system for dns role
- salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
deleted file mode 100644
index 0d0bd6b..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-ocata-ovs' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-ocata-ovs.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
- # Bind9 services are placed on the first two ctl nodes
- salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
index b59248a..17301c4 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
@@ -6,7 +6,7 @@
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-pike-ovs-dpdk.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml', '_context-cookiecutter-pike-ovs-dpdk.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl-barbican.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl-barbican.yaml
index 4b86b85..3ac439e 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl-barbican.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl-barbican.yaml
@@ -6,7 +6,7 @@
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml', '_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl.yaml
index c7de965..a69b421 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl.yaml
@@ -6,7 +6,7 @@
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-dvr-ssl.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml', '_context-cookiecutter-mcp-pike-dvr-ssl.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
index 2f19cd5..071d34e 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
@@ -6,7 +6,7 @@
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-dvr.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml', '_context-cookiecutter-mcp-pike-dvr.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
index ed3a6c9..9b34102 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
@@ -6,7 +6,7 @@
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-ovs.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml', '_context-cookiecutter-mcp-pike-ovs.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml
index 657e7c2..dcaff76 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml
@@ -6,7 +6,7 @@
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-dvr-ceph.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['vcp-context-environment.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['vcp-context-environment.yaml', 'cookiecutter-context-dvr-ceph.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl-barbican.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl-barbican.yaml
index 4e5dbc9..fa1c4ae 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl-barbican.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl-barbican.yaml
@@ -6,7 +6,7 @@
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml', '_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl.yaml
index bf6683d..53342a0 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl.yaml
@@ -6,7 +6,7 @@
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-queens-dvr-ssl.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml', '_context-cookiecutter-mcp-queens-dvr-ssl.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
index 1e50429..ab976ee 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
@@ -6,7 +6,7 @@
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-queens-dvr.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml', '_context-cookiecutter-mcp-queens-dvr.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
index 7e2d2de..b89f211 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
@@ -6,7 +6,7 @@
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-queens-ovs.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml', '_context-cookiecutter-mcp-queens-ovs.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/k8s-ha-calico/core.yaml b/tcp_tests/templates/k8s-ha-calico/core.yaml
index 1d0a8d2..e5ff52e 100644
--- a/tcp_tests/templates/k8s-ha-calico/core.yaml
+++ b/tcp_tests/templates/k8s-ha-calico/core.yaml
@@ -13,14 +13,14 @@
-C 'I@docker:host' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 5}
- skip_fail: false
+ skip_fail: true
- description: Check docker
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@docker:host' cmd.run 'docker ps'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: false
+ skip_fail: true
- description: Install keepalived on first node
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
diff --git a/tcp_tests/templates/k8s-ha-calico/underlay--user-data1604.yaml b/tcp_tests/templates/k8s-ha-calico/underlay--user-data1604.yaml
index 6fd3272..ec9df27 100644
--- a/tcp_tests/templates/k8s-ha-calico/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/k8s-ha-calico/underlay--user-data1604.yaml
@@ -25,6 +25,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -50,3 +53,26 @@
iface ens3 inet dhcp
auto ens4
iface ens4 inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/k8s-ha-contrail/underlay--user-data1604.yaml b/tcp_tests/templates/k8s-ha-contrail/underlay--user-data1604.yaml
index 1958f21..d23efd2 100644
--- a/tcp_tests/templates/k8s-ha-contrail/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/k8s-ha-contrail/underlay--user-data1604.yaml
@@ -27,6 +27,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
- export TERM=linux
- export LANG=C
# Configure dhclient
@@ -73,3 +76,26 @@
iface ens4 inet dhcp
auto ens5
iface ens5 inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/core.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/core.yaml
deleted file mode 100644
index c488f78..0000000
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/core.yaml
+++ /dev/null
@@ -1,117 +0,0 @@
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml
deleted file mode 100644
index 043b74a..0000000
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml
+++ /dev/null
@@ -1,419 +0,0 @@
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set PATTERN = os_env('PATTERN', 'smoke') %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
-
-# Install OpenStack control services
-
-- description: Upload policy override
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: overrides-policy.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Create custom cluster control class
- cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Rename control classes
- cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
- ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# isntall designate
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'ctl*' state.sls powerdns
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: iptables --policy FORWARD ACCEPT
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Run tests
- cmd: |
- if [[ {{ PATTERN }} == "false" ]]; then
- salt-call cmd.run 'docker run --rm --net=host -e TEMPEST_CONF=lvm_mcp.conf -e SKIP_LIST=mcp_skip.list -e SOURCE_FILE=keystonercv3 -v /root/:/home/rally docker-sandbox.sandbox.mirantis.net/rally-tempest/rally-tempest:with_designate >> image.output'
- else
- salt-call cmd.run "docker run --rm --net=host -e TEMPEST_CONF=lvm_mcp.conf -e SKIP_LIST=mcp_skip.list -e SOURCE_FILE=keystonercv3 -e CUSTOM='--pattern {{ PATTERN }}' -v /root/:/home/rally docker-sandbox.sandbox.mirantis.net/rally-tempest/rally-tempest:with_designate >> image.output"
- fi
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Download xml results
- download:
- remote_path: /root
- remote_filename: "report_*.xml"
- local_path: {{ os_env('PWD') }}
- node_name: {{ HOSTNAME_GTW01 }}
- skip_fail: true
-
-- description: Download html results
- download:
- remote_path: /root
- remote_filename: "report_*.html"
- local_path: {{ os_env('PWD') }}
- node_name: {{ HOSTNAME_GTW01 }}
- skip_fail: true
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/run_test.sh b/tcp_tests/templates/mcp-ocata-local-repo-dvr/run_test.sh
deleted file mode 100644
index c706960..0000000
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/run_test.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-export REPOSITORY_SUITE=2017.12
-export SALT_MODELS_SYSTEM_REPOSITORY=https://gerrit.mcp.mirantis.local.test/salt-models/reclass-system
-export SALT_FORMULAS_REPO=https://gerrit.mcp.mirantis.local.test/salt-formulas
-
-# Offline deployment simulation, requests to the apt01 node are redirected to publicly available repositories
-export FORMULA_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/xenial ${REPOSITORY_SUITE} salt"
-export FORMULA_GPG="http://apt.mirantis.local.test/public.gpg"
-export SALT_REPOSITORY="deb http://apt.mirantis.com/xenial/salt/2017.7/ ${REPOSITORY_SUITE} main"
-export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main universe restricted"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main universe restricted"
-
-# Offline deployment simulation, requests to the apt01 node are redirected to an 'offline apt node' with mirrors of repositories
-export FORMULA_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial ${REPOSITORY_SUITE} salt extra"
-export FORMULA_GPG="http://apt.mirantis.local.test/public.gpg"
-export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2017.7 main"
-export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial-updates main universe restricted"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial-security main universe restricted"
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/salt.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/salt.yaml
deleted file mode 100644
index 0d46180..0000000
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/salt.yaml
+++ /dev/null
@@ -1,79 +0,0 @@
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_APT01 with context %}
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_PRX01 with context %}
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_MON01 with context %}
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_MON02 with context %}
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_MON03 with context %}
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.local.test/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_CONTROL_PREFIX with context %}
-
-#- description: 'Generate nginx cert'
-# cmd: |
-# openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \
-# -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=www.gerrit.com" \
-# -keyout ssl-nginx.key -out ssl-nginx.crt;
-# node_name: {{ HOSTNAME_APT01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-- description: Check nginx APT node is ready
- cmd: systemctl status nginx;
- node_name: {{ HOSTNAME_APT01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check dnsmasq on APT node is ready
- cmd: systemctl status dnsmasq;
- node_name: {{ HOSTNAME_APT01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL03) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_MON03) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_MON02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_MON01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_GTW01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_PRX01) }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-- description: 'Workaround of local_repo_url - set to offline image repository structure'
- cmd: |
- find /srv/salt/reclass/classes/cluster -type f -exec sed -i 's/local_repo_url: .*/local_repo_url: mirror.mcp.mirantis.local.test/g' {} +
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml
deleted file mode 100644
index 9962efc..0000000
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
-{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--meta-data.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data-apt01.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data-apt01.yaml
deleted file mode 100644
index 3f4c1a2..0000000
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data-apt01.yaml
+++ /dev/null
@@ -1,116 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifup ens4
- - sudo ifup ens5
- - sudo ifup ens6
-
- - rm /etc/resolv.conf
- - touch /etc/resolv.conf
- - export LOCAL_DNS_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
- - echo "nameserver $LOCAL_DNS_IP" >> /etc/resolv.conf;
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "supersede domain-name-servers $LOCAL_DNS_IP, 172.18.208.44" >> /etc/dhcp/dhclient.conf
- - export TERM=linux
- - export LANG=C
-
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## Cloud repo01 node ##################
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - eatmydata apt-get clean && apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y salt-minion python-pip git curl tmux byobu iputils-ping traceroute htop tree;
- - openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=gerrit.mcp.mirantis.local.test" -keyout /root/ssl-nginx.key -out /root/ssl-nginx.crt;
- - cd /tmp;
- - git clone https://github.com/TatyankaLeontovich/salt-formula-nginx;
- - git clone https://github.com/TatyankaLeontovich/salt-dnsmasq;
- - git clone https://github.com/TatyankaLeontovich/underpillar;
- - mkdir -p /srv/pillar/;
- - mkdir -p /srv/salt;
- - cd /srv/salt;
- - ln -s /tmp/salt-formula-nginx/nginx;
- - ln -s /tmp/salt-dnsmasq/dnsmasq;
- - cp /tmp/underpillar/pillar/*.sls /srv/pillar/;
- - cp /tmp/underpillar/states/*.sls /srv/salt/;
- - cp /srv/pillar/top_localdns.sls /srv/pillar/top.sls;
- - cp /srv/salt/top_localdns.sls /srv/salt/top.sls;
- - find /srv/pillar/ -type f -exec sed -i "s/==LOCAL_DNS_IP==/${LOCAL_DNS_IP}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_APT==/{{ os_env('HOST_APT', 'apt.mirantis.com') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_SALTSTACK==/{{ os_env('HOST_SALTSTACK', 'repo.saltstack.com') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_ARCHIVE_UBUNTU==/{{ os_env('HOST_ARCHIVE_UBUNTU', 'archive.ubuntu.com') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_MCP_MIRANTIS==/{{ os_env('HOST_MIRROR_MCP_MIRANTIS', 'mirror.mcp.mirantis.net') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_FUEL_INFRA==/{{ os_env('HOST_MIRROR_FUEL_INFRA', 'mirror.fuel-infra.org') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_PPA_LAUNCHPAD==/{{ os_env('HOST_PPA_LAUNCHPAD', 'ppa.launchpad.net') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.com') }}/g" {} +
- - salt-call --local --state-output=mixed state.sls dnsmasq;
- - salt-call --local --state-output=mixed state.sls nginx;
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
- auto ens4
- iface ens4 inet dhcp
- auto ens5
- iface ens5 inet dhcp
- auto ens6
- iface ens6 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
\ No newline at end of file
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 2aad6bc..0000000
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- - rm /etc/resolv.conf
- - touch /etc/resolv.conf
- - LOCAL_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1 | cut -d"." -f1-3)
- - export DNS_IP=$LOCAL_IP".122"
- - echo "nameserver $DNS_IP" >> /etc/resolv.conf;
- - echo "nameserver $LOCAL_IP.1" >> /etc/resolv.conf;
- - echo "supersede domain-name-servers $DNS_IP" >> /etc/dhcp/dhclient.conf
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- - echo "Preparing base OS"
- - sleep 160;
- # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data1604.yaml
deleted file mode 100644
index 5a02d24..0000000
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- - rm /etc/resolv.conf
- - touch /etc/resolv.conf
- - LOCAL_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1 | cut -d"." -f1-3)
- - export DNS_IP=$LOCAL_IP".122"
- - echo "nameserver $DNS_IP" >> /etc/resolv.conf;
- - echo "nameserver $LOCAL_IP.1" >> /etc/resolv.conf;
- - echo "supersede domain-name-servers $DNS_IP" >> /etc/dhcp/dhclient.conf
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- - echo "Preparing base OS"
- # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml
deleted file mode 100644
index 9f2ce17..0000000
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml
+++ /dev/null
@@ -1,555 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'mcp-ocata-local-repo-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'mcp-ocata-local-repo-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'mcp-ocata-local-repo-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'mcp-ocata-local-repo-dvr/underlay--user-data-apt01.yaml' as CLOUDINIT_USER_DATA_APT01 with context %}
-
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_apt01 {{ CLOUDINIT_USER_DATA_APT01 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'mcp-ocata-local-repo-dvr') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_APT01 = os_env('HOSTNAME_APT01', 'apt01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'mcp-ocata-local-repo-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +122
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- ip_ranges:
- dhcp: [+90, -10]
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_APT01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_apt01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1604.yaml
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1604.yaml
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1604.yaml
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/shared-openstack.yaml b/tcp_tests/templates/shared-openstack.yaml
index b48a611..4a5be47 100644
--- a/tcp_tests/templates/shared-openstack.yaml
+++ b/tcp_tests/templates/shared-openstack.yaml
@@ -76,7 +76,7 @@
-C "I@keystone:server" cmd.run ". /root/keystonercv3;
openstack image list"
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 5, delay: 15}
skip_fail: false
{%- endmacro %}
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index dcbf3af..c3bc0f3 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -15,16 +15,20 @@
{% set COOKIECUTTER_REF_CHANGE = os_env('COOKIECUTTER_REF_CHANGE','') %}
{% set COOKIECUTTER_TAG = os_env('COOKIECUTTER_TAG','') %}
{% set COOKIECUTTER_TEMPLATE_COMMIT = os_env('COOKIECUTTER_TEMPLATE_COMMIT','') %}
+{% set ENVIRONMENT_TEMPLATE_COMMIT = os_env('ENVIRONMENT_TEMPLATE_COMMIT','') %}
{% set ENVIRONMENT_TEMPLATE_REF_CHANGE = os_env('ENVIRONMENT_TEMPLATE_REF_CHANGE','') %}
# Currently we support 2 salt version that can be set over bellow var
{% set SALT_VERSION = os_env('SALT_VERSION','2017.7') %}
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', 'deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME} ' + REPOSITORY_SUITE + ' salt extra') %}
-{% set FORMULA_GPG = os_env('FORMULA_GPG', 'http://apt.mirantis.com/public.gpg') %}
-#{# set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/2016.3 " + REPOSITORY_SUITE + " main") #}
+{# set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', 'deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME} ' + REPOSITORY_SUITE + ' salt extra') #}
+{% set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/salt-formulas"+"/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") %}
+{# set FORMULA_GPG = os_env('FORMULA_GPG', 'http://apt.mirantis.com/public.gpg') #}
+{% set FORMULA_GPG = os_env('FORMULA_GPG', "http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/salt-formulas/xenial/archive-salt-formulas.key") %}
+{# set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/2016.3 " + REPOSITORY_SUITE + " main") #}
# Note repo is changed so new one looks like defined bellow
{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE+ "/saltstack-" + SALT_VERSION+ "/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") %}
-{% set SALT_GPG = os_env('SALT_GPG', 'http://apt.mirantis.com/public.gpg') %}
+{# set SALT_GPG = os_env('SALT_GPG', 'http://apt.mirantis.com/public.gpg') #}
+{% set SALT_GPG = os_env('SALT_GPG', "http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/salt-formulas/xenial/archive-salt-formulas.key") %}
{% set UBUNTU_REPOSITORY = os_env('UBUNTU_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME} main restricted universe") %}
{% set UBUNTU_UPDATES_REPOSITORY = os_env('UBUNTU_UPDATES_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-updates main restricted universe") %}
{% set UBUNTU_SECURITY_REPOSITORY = os_env('UBUNTU_SECURITY_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-security main restricted universe") %}
@@ -284,6 +288,7 @@
find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/mcp_version:.*/mcp_version: {{ REPOSITORY_SUITE }}/g' {} +
{%- if IS_CONTRAIL_LAB %}
# vSRX IPs for tcp-qa images have 172.16.10.90 hardcoded
@@ -378,7 +383,13 @@
cmd: |
set -e;
set -x;
+ sudo apt-get install virtualenv -y
sudo apt-get install python-setuptools -y
+
+ [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
+ . /root/venv-reclass-tools/bin/activate;
+ pip install -U pip
+
pip install cookiecutter
chmod 0600 /tmp/{{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | basename }}
@@ -401,6 +412,10 @@
popd
{%- endif %}
+ if [ -f /root/cookiecutter-templates/requirements.txt ]; then
+ pip install -r /root/cookiecutter-templates/requirements.txt
+ fi
+
mkdir -p /srv/salt/reclass/classes/cluster/
mkdir -p /srv/salt/reclass/classes/system/
mkdir -p /srv/salt/reclass/classes/service/
@@ -507,21 +522,20 @@
set -e;
mkdir -p /root/environment/;
export GIT_SSL_NO_VERIFY=true; git clone https://github.com/Mirantis/environment-template /root/environment/environment_template
- node_name: {{ HOSTNAME_CFG01 }}
- skip_fail: false
-{%- if ENVIRONMENT_TEMPLATE_REF_CHANGE != '' %}
-- description: Fetch changes for environment templates
- cmd: |
- set -e;
- set -x;
+ {%- if ENVIRONMENT_TEMPLATE_REF_CHANGE != '' %}
pushd /root/environment/environment_template &&
git fetch https://github.com/Mirantis/environment-template {{ ENVIRONMENT_TEMPLATE_REF_CHANGE }} &&
git checkout FETCH_HEAD &&
popd
+ {%- elif ENVIRONMENT_TEMPLATE_COMMIT != '' %}
+ pushd /root/environment/environment_template
+ git checkout {{ ENVIRONMENT_TEMPLATE_COMMIT }}
+ popd
+ {%- endif %}
+
node_name: {{ HOSTNAME_CFG01 }}
skip_fail: false
-{%- endif %}
{%- for ENVIRONMENT_CONTEXT_NAME in ENVIRONMENT_CONTEXT_NAMES %}
- description: "[EXPERIMENTAL] Upload environment inventory to cfg01.{{ DOMAIN_NAME }}"
@@ -602,6 +616,7 @@
cmd: |
export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/"
find ${REPLACE_DIRS} -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/mcp_version:.*/mcp_version: {{ REPOSITORY_SUITE }}/g' {} +
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
@@ -709,6 +724,7 @@
apt-get update
# Install salt-minion
+ apt-get install eatmydata -y;
eatmydata apt-get install -y salt-minion;
# Install common packages
eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
diff --git a/tcp_tests/templates/shared-sl-tests.yaml b/tcp_tests/templates/shared-sl-tests.yaml
index 0cd49b3..00a9bb5 100644
--- a/tcp_tests/templates/shared-sl-tests.yaml
+++ b/tcp_tests/templates/shared-sl-tests.yaml
@@ -9,7 +9,7 @@
- description: Install stacklight-pytest into virlual environemnt
cmd: |
set -e;
- apt-get install -y build-essential python-dev;
+ apt-get install -y build-essential python-dev virtualenv;
apt-get -y install python-virtualenv;
virtualenv --system-site-packages venv-stacklight-pytest;
. venv-stacklight-pytest/bin/activate;
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/ceph.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/ceph.yaml
deleted file mode 100644
index 8705250..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/ceph.yaml
+++ /dev/null
@@ -1,162 +0,0 @@
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-
-# Install ceph mons
-- description: Update grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:common' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 20}
- skip_fail: false
-
-- description: Generate keyrings
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin' state.sls ceph.mon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync grains on ceph mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:mon' saltutil.sync_grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine on ceph mons
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install ceph mon
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:mon' state.sls ceph.mon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install ceph mgr if defined(needed only for Luminious)
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' state.sls ceph.mgr
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install ceph osd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' state.sls ceph.osd
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' saltutil.sync_grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install ceph osd nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' state.sls ceph.osd.custom
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Sync grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' saltutil.sync_grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update mine on ceph osd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Setup pools, keyrings and maybe crush
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:setup' state.sls ceph.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install ceph clinet
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:setup' state.sls ceph.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install radosgw if exists
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' saltutil.sync_grains;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' state.sls ceph.radosgw;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keystone:client' state.sls keystone.client;
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install ceph clinet
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:setup' state.sls ceph.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Connect ceph to glance
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-api;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Connect ceph to cinder and nova
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls nova;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Restart cinder volume
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Restart nova-compute
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nova:compute' service.restart nova-compute;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: conver cirros image
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'qemu-img convert cirros-0.3.4-i386-disk.img cirros.raw'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name "cirros" --disk-format raw --container-format bare --visibility public --file cirros.raw'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/core.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/core.yaml
deleted file mode 100644
index 464cf82..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/core.yaml
+++ /dev/null
@@ -1,124 +0,0 @@
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-- description: remove apparmor
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- '*' cmd.run 'service apparmor stop; service apparmor teardown; update-rc.d -f apparmor remove; apt-get -y remove apparmor'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml
deleted file mode 100644
index 827b16a..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml
+++ /dev/null
@@ -1,278 +0,0 @@
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; nova --debug service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: iptables --policy FORWARD ACCEPT
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/run_test.sh b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/run_test.sh
deleted file mode 100755
index 612cd47..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/run_test.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-
-. /home/jenkins/fuel-devops30/bin/activate
-pip install -r ./tcp_tests/requirements.txt -U
-pip install psycopg2
-
-export ENV_NAME=virtual-mcp-ocata-ceph-offline
-export VENV_PATH=/home/jenkins/fuel-devops30
-export IMAGE_PATH1604=/home/jenkins/images/xenial-server-cloudimg-amd64.qcow2
-export SHUTDOWN_ENV_ON_TEARDOWN=false
-export PYTHONIOENCODING=UTF-8
-export LAB_CONFIG_NAME=virtual-mcp-ocata-ceph-offline
-export CLUSTER_NAME=virtual-mcp-ocata-ovs-ceph-local
-export REPOSITORY_SUITE=stable
-export DISTROS_CODENAME=xenial
-export SALT_VERSION=2017.7
-
-export TEST_GROUP=test_ocata_ceph_all_ovs_install
-export RUN_TEMPEST=true
-
-# Offline deploy parameters
-export SALT_MODELS_REF_CHANGE=refs/changes/86/13886/9
-
-export BOOTSTRAP_TIMEOUT=1200
-
-export HOST_APT=10.170.0.242
-export HOST_SALTSTACK=10.170.0.242
-export HOST_ARCHIVE_UBUNTU=10.170.0.242
-export HOST_MIRROR_MCP_MIRANTIS=10.170.0.242
-export HOST_MIRROR_FUEL_INFRA=10.170.0.242
-export HOST_PPA_LAUNCHPAD=10.170.0.242
-
-export SALT_MODELS_SYSTEM_REPOSITORY=https://gerrit.mcp.mirantis.local.test/salt-models/reclass-system
-export SALT_FORMULAS_REPO=https://gerrit.mcp.mirantis.local.test/salt-formulas
-export FORMULA_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial ${REPOSITORY_SUITE} salt extra"
-export FORMULA_GPG="http://apt.mirantis.local.test/public.gpg"
-export SALT_REPOSITORY = "deb [arch=amd64] http://mirror.mirantis.local.test/" + REPOSITORY_SUITE+ "/saltstack-" + SALT_VERSION+ "/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main"
-
-export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2017.7 main"
-export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial-updates main universe restricted"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial-security main universe restricted"
-
-cd tcp_tests
-py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/salt.yaml
deleted file mode 100644
index 540acdc..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/salt.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_APT01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_PRX01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CMN01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CMN02 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CMN03 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_OSD01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_OSD02 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.local.test/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_CONTROL_PREFIX with context %}
-
-#- description: 'Generate nginx cert'
-# cmd: |
-# openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \
-# -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=www.gerrit.com" \
-# -keyout ssl-nginx.key -out ssl-nginx.crt;
-# node_name: {{ HOSTNAME_APT01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-- description: Check nginx APT node is ready
- cmd: systemctl status nginx;
- node_name: {{ HOSTNAME_APT01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check dnsmasq on APT node is ready
- cmd: systemctl status dnsmasq;
- node_name: {{ HOSTNAME_APT01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL03) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMN01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMN02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMN03) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_OSD01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_OSD02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_GTW01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_PRX01) }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-- description: 'Workaround of local_repo_url - set to offline image repository structure'
- cmd: |
- find /srv/salt/reclass/classes/cluster -type f -exec sed -i 's/local_repo_url: .*/local_repo_url: mirror.mcp.mirantis.local.test/g' {} +
- find /srv/salt/reclass/classes/cluster -type f -exec sed -i 's/aptly_server_address: .*/aptly_server_address: {{ os_env('HOST_APT') }}/g' {} +
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml
deleted file mode 100644
index 9710531..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml
+++ /dev/null
@@ -1,115 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifup ens4
- - sudo ifup ens5
- - sudo ifup ens6
-
- - rm /etc/resolv.conf
- - touch /etc/resolv.conf
- - export LOCAL_DNS_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
- - echo "nameserver $LOCAL_DNS_IP" >> /etc/resolv.conf;
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "supersede domain-name-servers $LOCAL_DNS_IP, 172.18.208.44" >> /etc/dhcp/dhclient.conf
- - export TERM=linux
- - export LANG=C
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## Cloud repo01 node ##################
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - eatmydata apt-get clean && apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y salt-minion python-pip git curl tmux byobu iputils-ping traceroute htop tree ntp;
- - openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=gerrit.mcp.mirantis.local.test" -keyout /root/ssl-nginx.key -out /root/ssl-nginx.crt;
- - cd /tmp;
- - git clone https://github.com/TatyankaLeontovich/salt-formula-nginx;
- - git clone https://github.com/TatyankaLeontovich/salt-dnsmasq;
- - git clone https://github.com/TatyankaLeontovich/underpillar;
- - mkdir -p /srv/pillar/;
- - mkdir -p /srv/salt;
- - cd /srv/salt;
- - ln -s /tmp/salt-formula-nginx/nginx;
- - ln -s /tmp/salt-dnsmasq/dnsmasq;
- - cp /tmp/underpillar/pillar/*.sls /srv/pillar/;
- - cp /tmp/underpillar/states/*.sls /srv/salt/;
- - cp /srv/pillar/top_localdns.sls /srv/pillar/top.sls;
- - cp /srv/salt/top_localdns.sls /srv/salt/top.sls;
- - find /srv/pillar/ -type f -exec sed -i "s/==LOCAL_DNS_IP==/${LOCAL_DNS_IP}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_APT==/{{ os_env('HOST_APT', 'apt.mirantis.com') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_SALTSTACK==/{{ os_env('HOST_SALTSTACK', 'repo.saltstack.com') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_ARCHIVE_UBUNTU==/{{ os_env('HOST_ARCHIVE_UBUNTU', 'archive.ubuntu.com') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_MCP_MIRANTIS==/{{ os_env('HOST_MIRROR_MCP_MIRANTIS', 'mirror.mcp.mirantis.net') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_FUEL_INFRA==/{{ os_env('HOST_MIRROR_FUEL_INFRA', 'mirror.fuel-infra.org') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_PPA_LAUNCHPAD==/{{ os_env('HOST_PPA_LAUNCHPAD', 'ppa.launchpad.net') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.com') }}/g" {} +
- - salt-call --local --state-output=mixed state.sls dnsmasq;
- - salt-call --local --state-output=mixed state.sls nginx;
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
- auto ens4
- iface ens4 inet dhcp
- auto ens5
- iface ens5 inet dhcp
- auto ens6
- iface ens6 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 800a0b1..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
-
- #- sudo route add default gw {gateway} {interface_name}
- - rm /etc/resolv.conf
- - touch /etc/resolv.conf
- - LOCAL_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1 | cut -d"." -f1-3)
- - export DNS_IP=$LOCAL_IP".122"
- - echo "nameserver $DNS_IP" >> /etc/resolv.conf;
- - echo "nameserver $LOCAL_IP.1" >> /etc/resolv.conf;
- - echo "supersede domain-name-servers $DNS_IP" >> /etc/dhcp/dhclient.conf
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data1604.yaml
deleted file mode 100644
index 5a02d24..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- - rm /etc/resolv.conf
- - touch /etc/resolv.conf
- - LOCAL_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1 | cut -d"." -f1-3)
- - export DNS_IP=$LOCAL_IP".122"
- - echo "nameserver $DNS_IP" >> /etc/resolv.conf;
- - echo "nameserver $LOCAL_IP.1" >> /etc/resolv.conf;
- - echo "supersede domain-name-servers $DNS_IP" >> /etc/dhcp/dhclient.conf
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- - echo "Preparing base OS"
- # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml
deleted file mode 100644
index 1fd9b17..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml
+++ /dev/null
@@ -1,635 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp-ocata-ceph-offline/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-ceph-offline/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml' as CLOUDINIT_USER_DATA_APT01 with context %}
-
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_apt01 {{ CLOUDINIT_USER_DATA_APT01 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-ceph-offline') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_APT01 = os_env('HOSTNAME_APT01', 'apt01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-ceph-offline_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +122
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_APT01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_apt01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 8
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml
deleted file mode 100644
index 25ec2f0..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml
+++ /dev/null
@@ -1,162 +0,0 @@
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-
-# Install ceph mons
-- description: Update grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:common' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Generate keyrings
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin' state.sls ceph.mon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync grains on ceph mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:mon' saltutil.sync_grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine on ceph mons
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install ceph mon
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:mon' state.sls ceph.mon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install ceph mgr if defined(needed only for Luminious)
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' state.sls ceph.mgr
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install ceph osd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' state.sls ceph.osd
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' saltutil.sync_grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install ceph osd nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' state.sls ceph.osd.custom
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Sync grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' saltutil.sync_grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update mine on ceph osd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Setup pools, keyrings and maybe crush
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:setup' state.sls ceph.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install ceph clinet
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:setup' state.sls ceph.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install radosgw if exists
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' saltutil.sync_grains;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' state.sls ceph.radosgw;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keystone:client' state.sls keystone.client;
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install ceph clinet
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:setup' state.sls ceph.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Connect ceph to glance
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-api;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Connect ceph to cinder and nova
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls nova;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Restart cinder volume
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Restart nova-compute
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nova:compute' service.restart nova-compute;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: conver cirros image
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'qemu-img convert cirros-0.3.4-i386-disk.img cirros.raw'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name "cirros" --disk-format raw --container-format bare --visibility public --file cirros.raw'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/core.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/core.yaml
deleted file mode 100644
index 70112b7..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/core.yaml
+++ /dev/null
@@ -1,116 +0,0 @@
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
deleted file mode 100644
index 4fbecca..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
+++ /dev/null
@@ -1,278 +0,0 @@
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; nova --debug service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: iptables --policy FORWARD ACCEPT
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/salt.yaml
deleted file mode 100644
index 350be48..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/salt.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
deleted file mode 100644
index 75fffe4..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
+++ /dev/null
@@ -1,583 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp-ocata-ovs-ceph/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-ovs-ceph/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-ovs-ceph') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-ovs-ceph_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
deleted file mode 100644
index e2573e8..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
+++ /dev/null
@@ -1,170 +0,0 @@
-{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import DOMAIN_NAME with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-
-# Install ceph mons
-- description: Update grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:common' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Generate keyrings
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin' state.sls ceph.mon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync grains on ceph mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:mon' saltutil.sync_grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine on ceph mons
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install ceph mon
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:mon' state.sls ceph.mon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install ceph mgr if defined(needed only for Luminious)
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' state.sls ceph.mgr
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install ceph osd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' state.sls ceph.osd
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' saltutil.sync_grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install ceph osd nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' state.sls ceph.osd.custom
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Sync grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' saltutil.sync_grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update mine on ceph osd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Setup pools, keyrings and maybe crush
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:setup' state.sls ceph.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install ceph clinet
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:setup' state.sls ceph.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install radosgw if exists
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' saltutil.sync_grains;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' state.sls ceph.radosgw;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keystone:client' state.sls keystone.client;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install ceph clinet
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:setup' state.sls ceph.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{%- for ssh in config.underlay.ssh %}
- {%- set salt_roles = [] %}
- {%- for role in ssh['roles'] %}
- {%- if role in config.salt_deploy.salt_roles %}
- {%- set _ = salt_roles.append(role) %}
- {%- endif %}
- {%- endfor %}
-
- {%- if salt_roles %}
-- description: Restart salt-minion as workaround of PROD-16970
- cmd: |
- service salt-minion restart; # For case if salt-minion was already installed
- node_name: {{ ssh['node_name'] }}
- retry: {count: 1, delay: 1}
- skip_fail: false
- {%- endif %}
-{%- endfor %}
-
-- description: Connect ceph to glance
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-api;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Connect ceph to cinder and nova
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls nova;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Restart cinder volume
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Restart nova-compute
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nova:compute' service.restart nova-compute;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-{{ BACKUP.MACRO_BACKUP_CEPH() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/core.yaml
deleted file mode 100644
index 3a24e5e..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
deleted file mode 100644
index 13c3e13..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
-{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-{{ BACKUP.MACRO_WR_NGINX_MASTER() }}
-{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
-{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml
deleted file mode 100644
index 89c3882..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "backupninja" "glusterfs" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml
deleted file mode 100644
index 08c87ef..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml
+++ /dev/null
@@ -1,678 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp-pike-dvr-ceph-rgw/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-pike-dvr-ceph-rgw') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW01 = os_env('HOSTNAME_CMN01', 'rgw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW02 = os_env('HOSTNAME_CMN02', 'rgw02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW03 = os_env('HOSTNAME_CMN03', 'rgw03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-pike-dvr-ceph-rgw_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+70, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+70, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: ceph
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: ceph
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_RGW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_RGW02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
- - name: {{ HOSTNAME_RGW03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml
index 5c35319..ccc1a07 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml
@@ -1,4 +1,4 @@
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% import 'shared-core.yaml' as SHARED_CORE with context %}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/openstack.yaml
index 94a72e3..c3ab09e 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/openstack.yaml
@@ -1,8 +1,8 @@
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_GTW01 with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/sl.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/sl.yaml
index b3818b7..d7461ed 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/sl.yaml
@@ -1,4 +1,4 @@
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
# Install docker swarm
- description: Install keepalived on mon nodes
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/core.yaml
deleted file mode 100644
index a2d4be8..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
deleted file mode 100644
index 2b791b2..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
+++ /dev/null
@@ -1,181 +0,0 @@
-{% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-- description: Nginx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-# isntall designate
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@powerdns:server' state.sls powerdns.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install DogTag server service
-- description: Install DogTag server service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@dogtag:server and *01*' state.sls dogtag.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install DogTag server service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@dogtag:server' state.sls dogtag.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install Barbican server service
-- description: Install Barbican server service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@barbican:server and *01*' state.sls barbican.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Barbican server service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@barbican:server' state.sls barbican.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Barbican client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@barbican:client' state.sls barbican.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: True
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-- description: Install manila-api on first node
- cmd: |
- salt -C 'I@manila:api and *01*' state.sls manila.api;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install manila-api on other nodes
- cmd: |
- salt -C 'I@manila:api and not *01*' state.sls manila.api;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install manila-scheduler
- cmd: |
- salt -C 'I@manila:scheduler' state.sls manila.scheduler;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install manila-share
- cmd: |
- salt -C 'I@manila:share' state.sls manila.share;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: WR https://mirantis.jira.com/browse/PROD-19012
- cmd: |
- salt 'ctl*' cmd.run 'systemctl restart apache2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Check manila-services
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: Create manila type
- cmd: |
- salt 'cfg01*' state.sls manila.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Create CIFS and NFS share and check it status
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create CIFS 1 --share-type=default';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create NFS 1 --share-type=default';
- sleep 5;
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila list';
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/salt.yaml
deleted file mode 100644
index bb4c5e4..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/salt.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-{% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "barbican" "dogtag" "runtest" "artifactory" "logrotate" "auditd" "gnocchi" "manila" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml
deleted file mode 100644
index b10f820..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml
+++ /dev/null
@@ -1,516 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp-pike-dvr-ssl-barbican/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-pike-dvr-ssl-barbican') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01') %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02') %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03') %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01') %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02') %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01') %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01') %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01') %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02') %}
-{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01') %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-pike-dvr-ssl-barbican_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_SHARE01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/core.yaml
deleted file mode 100644
index 49b016a..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
deleted file mode 100644
index dcc854e..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
+++ /dev/null
@@ -1,164 +0,0 @@
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{%- if OVERRIDE_POLICY != '' %}
-- description: Upload policy override
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: overrides-policy.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Create custom cluster control class
- cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Rename control classes
- cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
- ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
-{%- endif %}
-
-- description: Nginx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-# isntall designate
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@powerdns:server' state.sls powerdns.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-- description: Install manila-api on first node
- cmd: |
- salt -C 'I@manila:api and *01*' state.sls manila.api;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install manila-api on other nodes
- cmd: |
- salt -C 'I@manila:api and not *01*' state.sls manila.api;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install manila-scheduler
- cmd: |
- salt -C 'I@manila:scheduler' state.sls manila.scheduler;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install manila-share
- cmd: |
- salt -C 'I@manila:share' state.sls manila.share;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: WR https://mirantis.jira.com/browse/PROD-19012
- cmd: |
- salt 'ctl*' cmd.run 'systemctl restart apache2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check manila-services
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 15}
- skip_fail: false
-
-- description: Create manila type
- cmd: |
- salt 'cfg01*' state.sls manila.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Create CIFS and NFS share and check it status
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create CIFS 1 --share-type=default';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create NFS 1 --share-type=default';
- sleep 5;
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila list';
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/overrides-policy.yml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/overrides-policy.yml
deleted file mode 100644
index 1f35a6b..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/overrides-policy.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-parameters:
- nova:
- controller:
- policy:
- context_is_admin: 'role:admin or role:administrator'
- 'compute:create': 'rule:admin_or_owner'
- 'compute:create:attach_network':
- cinder:
- controller:
- policy:
- 'volume:delete': 'rule:admin_or_owner'
- 'volume:extend':
- neutron:
- server:
- policy:
- create_subnet: 'rule:admin_or_network_owner'
- 'get_network:queue_id': 'rule:admin_only'
- 'create_network:shared':
- glance:
- server:
- policy:
- publicize_image: "role:admin"
- add_member:
- keystone:
- server:
- policy:
- admin_or_token_subject: 'rule:admin_required or rule:token_subject'
- heat:
- server:
- policy:
- context_is_admin: 'role:admin and is_admin_project:True'
- deny_stack_user: 'not role:heat_stack_user'
- deny_everybody: '!'
- 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
- 'cloudformation:DescribeStackResources':
- ceilometer:
- server:
- policy:
- segregation: 'rule:context_is_admin'
- 'telemetry:get_resource':
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/salt.yaml
deleted file mode 100644
index 6729010..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/salt.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "runtest" "auditd" "gnocchi" "manila" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml
deleted file mode 100644
index aaf67ba..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml
+++ /dev/null
@@ -1,515 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp-pike-dvr-ssl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-pike-dvr-ssl/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-pike-dvr-ssl') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-pike-dvr-ssl_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_SHARE01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/core.yaml
deleted file mode 100644
index 5c35319..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
deleted file mode 100644
index 05fe6ee..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
+++ /dev/null
@@ -1,162 +0,0 @@
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{%- if OVERRIDE_POLICY != '' %}
-- description: Upload policy override
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: overrides-policy.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Create custom cluster control class
- cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Rename control classes
- cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
- ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
-{%- endif %}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-# isntall designate
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@powerdns:server' state.sls powerdns.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-- description: Install manila-api on first node
- cmd: |
- salt -C 'I@manila:api and *01*' state.sls manila.api;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install manila-api on other nodes
- cmd: |
- salt -C 'I@manila:api and not *01*' state.sls manila.api;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install manila-scheduler
- cmd: |
- salt -C 'I@manila:scheduler' state.sls manila.scheduler;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install manila-share
- cmd: |
- salt -C 'I@manila:share' state.sls manila.share;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: WR https://mirantis.jira.com/browse/PROD-19012
- cmd: |
- salt 'ctl*' cmd.run 'systemctl restart apache2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check manila-services
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 15}
- skip_fail: false
-
-- description: Create manila type
- cmd: |
- salt 'cfg01*' state.sls manila.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Create CIFS and NFS share and check it status
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create CIFS 1 --share-type=default';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create NFS 1 --share-type=default';
- sleep 5;
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila list';
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ BACKUP.MACRO_WR_NGINX_MASTER() }}
-{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
-{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/overrides-policy.yml b/tcp_tests/templates/virtual-mcp-pike-dvr/overrides-policy.yml
deleted file mode 100644
index 1f35a6b..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/overrides-policy.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-parameters:
- nova:
- controller:
- policy:
- context_is_admin: 'role:admin or role:administrator'
- 'compute:create': 'rule:admin_or_owner'
- 'compute:create:attach_network':
- cinder:
- controller:
- policy:
- 'volume:delete': 'rule:admin_or_owner'
- 'volume:extend':
- neutron:
- server:
- policy:
- create_subnet: 'rule:admin_or_network_owner'
- 'get_network:queue_id': 'rule:admin_only'
- 'create_network:shared':
- glance:
- server:
- policy:
- publicize_image: "role:admin"
- add_member:
- keystone:
- server:
- policy:
- admin_or_token_subject: 'rule:admin_required or rule:token_subject'
- heat:
- server:
- policy:
- context_is_admin: 'role:admin and is_admin_project:True'
- deny_stack_user: 'not role:heat_stack_user'
- deny_everybody: '!'
- 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
- 'cloudformation:DescribeStackResources':
- ceilometer:
- server:
- policy:
- segregation: 'rule:context_is_admin'
- 'telemetry:get_resource':
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
deleted file mode 100644
index e027b64..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "runtest" "neutron" "logrotate" "auditd" "gnocchi" "manila" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay.yaml
deleted file mode 100644
index e1914b4..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay.yaml
+++ /dev/null
@@ -1,516 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp-pike-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-pike-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-pike-dvr') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-pike-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_SHARE01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
deleted file mode 100644
index 887d5eb..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
+++ /dev/null
@@ -1,169 +0,0 @@
-{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import DOMAIN_NAME with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-# Install ceph mons
-- description: Update grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:common' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Generate keyrings
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin' state.sls ceph.mon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync grains on ceph mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:mon' saltutil.sync_grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine on ceph mons
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install ceph mon
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:mon' state.sls ceph.mon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install ceph mgr if defined(needed only for Luminious)
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' state.sls ceph.mgr
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install ceph osd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' state.sls ceph.osd
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' saltutil.sync_grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install ceph osd nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' state.sls ceph.osd.custom
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Sync grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' saltutil.sync_grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update mine on ceph osd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Setup pools, keyrings and maybe crush
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:setup' state.sls ceph.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install ceph clinet
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:setup' state.sls ceph.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install radosgw if exists
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' saltutil.sync_grains;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' state.sls ceph.radosgw;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keystone:client' state.sls keystone.client;
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install ceph clinet
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:setup' state.sls ceph.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{%- for ssh in config.underlay.ssh %}
- {%- set salt_roles = [] %}
- {%- for role in ssh['roles'] %}
- {%- if role in config.salt_deploy.salt_roles %}
- {%- set _ = salt_roles.append(role) %}
- {%- endif %}
- {%- endfor %}
-
- {%- if salt_roles %}
-- description: Restart salt-minion as workaround of PROD-16970
- cmd: |
- service salt-minion restart; # For case if salt-minion was already installed
- node_name: {{ ssh['node_name'] }}
- retry: {count: 1, delay: 1}
- skip_fail: false
- {%- endif %}
-{%- endfor %}
-
-- description: Connect ceph to glance
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-api;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Connect ceph to cinder and nova
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls nova;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Restart cinder volume
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Restart nova-compute
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nova:compute' service.restart nova-compute;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/core.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/core.yaml
deleted file mode 100644
index 3aed7e6..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
deleted file mode 100644
index f2a71cd..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/salt.yaml
deleted file mode 100644
index 684c535..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/salt.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay.yaml
deleted file mode 100644
index 1ce697b..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay.yaml
+++ /dev/null
@@ -1,580 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp-pike-ovs-ceph/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-pike-ovs-ceph/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-pike-ovs-ceph') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-pike-ovs-ceph_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/core.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/core.yaml
deleted file mode 100644
index af8778d..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
deleted file mode 100644
index ea91bad..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
+++ /dev/null
@@ -1,188 +0,0 @@
-{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-pike-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-pike-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-# isntall designate
-- description: Install bind
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@bind:server' state.sls bind
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-# Install Telemetry services (mdb nodes)
-- description: Install redis service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@redis:cluster:role:master' state.sls redis &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@redis:server' state.sls redis
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install gnocchi server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:server and *01*' state.sls gnocchi.server &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:server' state.sls gnocchi.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup gnocchi client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:client and *01*' state.sls gnocchi.client &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:client' state.sls gnocchi.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install panko server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@panko:server and *01*' state.sls panko &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@panko:server' state.sls panko
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install ceilometer server on first node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceilometer:server and *01*' state.sls ceilometer
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install ceilometer server on other nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceilometer:server' state.sls ceilometer
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install aodh server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aodh:server and *01*' state.sls aodh &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aodh:server' state.sls aodh
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install OpenStack dashboard and proxy services
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-- description: Install manila-api on first node
- cmd: |
- salt -C 'I@manila:api and *01*' state.sls manila.api;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install manila-api on other nodes
- cmd: |
- salt -C 'I@manila:api and not *01*' state.sls manila.api;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install manila-scheduler
- cmd: |
- salt -C 'I@manila:scheduler' state.sls manila.scheduler;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install manila-share
- cmd: |
- salt -C 'I@manila:share' state.sls manila.share;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: WR https://mirantis.jira.com/browse/PROD-19012
- cmd: |
- salt 'ctl*' cmd.run 'systemctl restart apache2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check manila-services
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 15}
- skip_fail: false
-
-- description: Create manila type
- cmd: |
- salt 'cfg01*' state.sls manila.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Create CIFS and NFS share and check it status
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create CIFS 1 --share-type=default';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create NFS 1 --share-type=default';
- sleep 5;
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila list';
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ BACKUP.MACRO_WR_NGINX_MASTER() }}
-{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
-{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
deleted file mode 100644
index 04a3e30..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-pike-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja" "runtest" "neutron" "logrotate" "auditd" "gnocchi" "manila" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{ SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG() }}
-
-{{ SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay.yaml
deleted file mode 100644
index 816f9b1..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay.yaml
+++ /dev/null
@@ -1,555 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp-pike-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-pike-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-pike-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-pike-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MDB01 }}: +45
- default_{{ HOSTNAME_MDB02 }}: +46
- default_{{ HOSTNAME_MDB03 }}: +47
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MDB01 }}: +45
- default_{{ HOSTNAME_MDB02 }}: +46
- default_{{ HOSTNAME_MDB03 }}: +47
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MDB01 }}: +45
- default_{{ HOSTNAME_MDB02 }}: +46
- default_{{ HOSTNAME_MDB03 }}: +47
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MDB01 }}: +45
- default_{{ HOSTNAME_MDB02 }}: +46
- default_{{ HOSTNAME_MDB03 }}: +47
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_SHARE01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/Readme.txt b/tcp_tests/templates/virtual-mcp-sl-os/Readme.txt
deleted file mode 100644
index b4fa30f..0000000
--- a/tcp_tests/templates/virtual-mcp-sl-os/Readme.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Template for deploying mitaka/newton models:
-- virtual-mcp-mitaka-dvr
-- virtual-mcp-mitaka-ovs
-- virtual-mcp-newton-dvr
-- virtual-mcp-newton-ovs
-
-Used by maintenance team.
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/core.yaml b/tcp_tests/templates/virtual-mcp-sl-os/core.yaml
deleted file mode 100644
index 41b92ab..0000000
--- a/tcp_tests/templates/virtual-mcp-sl-os/core.yaml
+++ /dev/null
@@ -1,117 +0,0 @@
-{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml b/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml
deleted file mode 100644
index dc5c185..0000000
--- a/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml
+++ /dev/null
@@ -1,309 +0,0 @@
-{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-# Install OpenStack control services
-
-- description: Install glance on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server:role:primary' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install glance on other controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server:role:secondary' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "service apache2 restart"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "service apache2 status"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# install designate
-- description: Install bind
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@bind:server' state.sls bind
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-#- description: Allow all tcp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; openstack security group rule create --proto tcp --dst-port 22 default'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Allow all icmp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; openstack security group rule create --proto icmp default'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy on gtw
- cmd: |
- set -e;
- iptables --policy FORWARD ACCEPT;
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
deleted file mode 100644
index a1b2c92..0000000
--- a/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-sl-os/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-sl-os/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/cluster/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "docker" "kibana" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':', 1) %}
-- description: Override cluster parameters
- cmd: |
- salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endfor %}
-
-- description: Refresh pillar
- cmd: salt '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endif %}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-- description: WR run linux state to fix hosts
- cmd: salt "cfg*" state.sls linux
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml b/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml
deleted file mode 100644
index dbbc9bc..0000000
--- a/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml
+++ /dev/null
@@ -1,176 +0,0 @@
-{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-# Install docker swarm
-- description: Configure docker service
- cmd: salt -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-- description: Install telegraf
- cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters
- cmd: salt -C 'I@prometheus:exporters' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure collector
- cmd: salt -C 'I@fluentd:agent' state.sls fluentd
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: docker ps
- cmd: salt -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp-sl-os/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 1018c28..0000000
--- a/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- # Enable grub menu using updated config below
- - update-grub
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml
deleted file mode 100644
index 0782243..0000000
--- a/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml
+++ /dev/null
@@ -1,512 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp-sl-os/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-sl-os/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-sl-os/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-sl-os') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-sl-os_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
diff --git a/tcp_tests/templates/virtual-mcp-trusty/Readme.txt b/tcp_tests/templates/virtual-mcp-trusty/Readme.txt
deleted file mode 100644
index da47d0b..0000000
--- a/tcp_tests/templates/virtual-mcp-trusty/Readme.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-Template for deploying mitaka models with trusty:
-- virtual-mcp-mitaka-dvr-trusty
-- virtual-mcp-mitaka-ovs-trusty
-
-Used by maintenance team.
-
-Use following env vars should be used:
-SALT_MODELS_COMMIT = 'fa85f84'
-SALT_MODELS_SYSTEM_TAG = '2018.8.0'
-REPOSITORY_SUITE = '2018.8.0'
-OVERRIDES = 'openstack_log_appender: true
-linux_system_repo_mk_openstack_version: testing
-'
-
-Also VCP 2018.8.0 images should be used
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-trusty/core.yaml b/tcp_tests/templates/virtual-mcp-trusty/core.yaml
deleted file mode 100644
index a433aee..0000000
--- a/tcp_tests/templates/virtual-mcp-trusty/core.yaml
+++ /dev/null
@@ -1,129 +0,0 @@
-{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# vkhlyunev: shared steps are constantly updating due master development so
-# we cant use them for old release (e.g. new steps for gluster are using
-# glusterfs:server:role:primary pillar for targeting which does not exists in
-# 2018.8.0 release model (and we can't update the model)
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service && sleep 20
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server and *01*' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on other nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status && gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml b/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
deleted file mode 100644
index fff0966..0000000
--- a/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
+++ /dev/null
@@ -1,264 +0,0 @@
-{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-# vkhlyunev: shared steps are constantly updating due master development so
-# we cant use them for old release. For openstack.yaml we can use some shared
-# steps for now but TODO: bind deployment workflow to 2018.8.0 state
-
-# Install OpenStack control services
-- description: Sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -G 'oscodename:trusty' cmd.run "service ntp stop && ntpdate pool.ntp.org && service ntp start"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Deploy nginx before openstack services (PROD-22740)
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Install keystone service on primary node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server and *01*' state.sls keystone.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-- description: Install keystone service on other nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*'
- cmd.run "service apache2 restart"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*'
- cmd.run "service apache2 status"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' system user)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls glusterfs.client -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C "I@keystone:server" cmd.run ". /root/keystonercv3;
- openstack service list"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-- description: Install nova service on primary node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C "I@nova:controller and *01*" state.sls nova.controller
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install nova service on other nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C "I@nova:controller" state.sls nova.controller
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C "I@keystone:server" cmd.run ". /root/keystonercv3;
- openstack compute service list"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check nova list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C "I@keystone:server" cmd.run ". /root/keystonercv3;
- openstack server list"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Restart libvirtd on compute nodes (PROD-23034)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' service.restart libvirtd
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-# Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-#- description: Allow all tcp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; openstack security group rule create --proto tcp --dst-port 22 default'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Allow all icmp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; openstack security group rule create --proto icmp default'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy on gtw
- cmd: |
- set -e;
- iptables --policy FORWARD ACCEPT;
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-trusty/salt.yaml b/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
deleted file mode 100644
index d8f7fb7..0000000
--- a/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
+++ /dev/null
@@ -1,98 +0,0 @@
-{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-trusty/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-trusty/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/cluster/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-# vkhlyunev: sometimes we have to verify fixes to mitaka openstack based on
-# ubuntu trusty OS deployment. Last discovered deploy-able configuration is based on
-# mcp-virtual-lab/salt-formulas/reclass-system parameters/commits/tags listed below
-# SALT_MODELS_COMMIT = 'fa85f84'
-# SALT_MODELS_SYSTEM_TAG = '2018.8.0'
-# REPOSITORY_SUITE = '2018.8.0'
-# OVERRIDES = 'openstack_log_appender: true
-# linux_system_repo_mk_openstack_version: testing'
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "docker" "kibana" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':', 1) %}
-- description: Override cluster parameters
- cmd: |
- salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endfor %}
-{%- endif %}
-
-# vkhlyunev: fa85f84 model commit contains sphinx on cfg01 node
-# which is not required for mitaka-trusty testing. Unfortunately we can not fix
-# it in model itself due to constantly updating models according to development
-# of main release.
-- description: Apply sphinx workaround - delete system.sphinx class
- cmd: sed -i -e '/system.sphinx/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml
- node_name: {{ HOSTNAME_CFG01 }}
- skip_fail: False
-
-- description: Apply sphinx workaround - delete nginx section
- cmd: sed -i -e '/ nginx:/,+8d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml
- node_name: {{ HOSTNAME_CFG01 }}
- skip_fail: False
-
-- description: Refresh pillar
- cmd: salt '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-- description: WR run linux state to fix hosts
- cmd: salt "cfg*" state.sls linux
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install linux-image-extra package on controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@cinder:controller' cmd.run 'apt -y install linux-image-extra-$(uname -r)'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-# WORKAROUND PROD-21071
-- description: Set correct pin for openstack repository
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin: release l=mitaka/Pin: release l=trusty\/openstack\/mitaka testing/g' /etc/apt/preferences.d/mirantis_openstack"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# WORKAROUND PROD-22827
-- description: Set Pin-Priority up to 1200 for openstack repository
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin-Priority: 1100/Pin-Priority: 1200/g' /etc/apt/preferences.d/mirantis_openstack"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-trusty/sl.yaml b/tcp_tests/templates/virtual-mcp-trusty/sl.yaml
deleted file mode 100644
index c517c63..0000000
--- a/tcp_tests/templates/virtual-mcp-trusty/sl.yaml
+++ /dev/null
@@ -1,176 +0,0 @@
-{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-# Install docker swarm
-- description: Configure docker service
- cmd: salt -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-- description: Install telegraf
- cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters
- cmd: salt -C 'I@prometheus:exporters' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure collector
- cmd: salt -C 'I@fluentd:agent' state.sls fluentd
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: docker ps
- cmd: salt -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/virtual-mcp-trusty/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-trusty/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp-trusty/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-trusty/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-trusty/underlay--user-data-cfg01.yaml
deleted file mode 100644
index a8afd05..0000000
--- a/tcp_tests/templates/virtual-mcp-trusty/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- # Enable grub menu using updated config below
- - update-grub
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-trusty/underlay--user-data1404.yaml b/tcp_tests/templates/virtual-mcp-trusty/underlay--user-data1404.yaml
deleted file mode 100644
index b30ee21..0000000
--- a/tcp_tests/templates/virtual-mcp-trusty/underlay--user-data1404.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup eth0
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto eth0
- iface eth0 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp-trusty/underlay.yaml b/tcp_tests/templates/virtual-mcp-trusty/underlay.yaml
deleted file mode 100644
index 4a4e8ca..0000000
--- a/tcp_tests/templates/virtual-mcp-trusty/underlay.yaml
+++ /dev/null
@@ -1,446 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp-trusty/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-trusty/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-trusty/underlay--user-data1404.yaml' as CLOUDINIT_USER_DATA1404 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1404 {{ CLOUDINIT_USER_DATA1404 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-trusty') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-trusty_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: nat
-
-
- group_volumes:
- - name: cloudimage1404 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1404 # https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1404
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1404
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1404
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1404
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1404
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1404
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1404
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/core.yaml b/tcp_tests/templates/virtual-mcp11-dvr/core.yaml
deleted file mode 100644
index 6653d1f..0000000
--- a/tcp_tests/templates/virtual-mcp11-dvr/core.yaml
+++ /dev/null
@@ -1,124 +0,0 @@
-{% from 'virtual-mcp11-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 20}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml
deleted file mode 100644
index 16ada26..0000000
--- a/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml
+++ /dev/null
@@ -1,360 +0,0 @@
-{% from 'virtual-mcp11-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp11-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp11-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp11-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Restart cinder volume
- cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp11-dvr/salt.yaml
deleted file mode 100644
index b557d3a..0000000
--- a/tcp_tests/templates/virtual-mcp11-dvr/salt.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-{% from 'virtual-mcp11-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp11-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-#- description: Hack gtw node
-# cmd: salt 'gtw*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.94/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: Hack cmp01 node
-# cmd: salt 'cmp01*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.95/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: Hack cmp02 node
-# cmd: salt 'cmp02*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.96/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp11-dvr/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp11-dvr/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-dvr/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 600f6fb..0000000
--- a/tcp_tests/templates/virtual-mcp11-dvr/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,90 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp11-dvr/underlay--user-data1604.yaml
deleted file mode 100644
index 48e3a15..0000000
--- a/tcp_tests/templates/virtual-mcp11-dvr/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/underlay.yaml b/tcp_tests/templates/virtual-mcp11-dvr/underlay.yaml
deleted file mode 100644
index 51f1b64..0000000
--- a/tcp_tests/templates/virtual-mcp11-dvr/underlay.yaml
+++ /dev/null
@@ -1,420 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp11-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp11-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp11-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp11-dvr') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp11-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +91
- default_{{ HOSTNAME_CTL02 }}: +92
- default_{{ HOSTNAME_CTL03 }}: +93
- default_{{ HOSTNAME_CMP01 }}: +95
- default_{{ HOSTNAME_CMP02 }}: +96
- default_{{ HOSTNAME_GTW01 }}: +94
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +91
- default_{{ HOSTNAME_CTL02 }}: +92
- default_{{ HOSTNAME_CTL03 }}: +93
- default_{{ HOSTNAME_CMP01 }}: +95
- default_{{ HOSTNAME_CMP02 }}: +96
- default_{{ HOSTNAME_GTW01 }}: +94
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/core.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/core.yaml
deleted file mode 100644
index 717fccc..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/core.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Create and distribute SSL certificates for services using salt state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls salt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install haproxy.service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install docker
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@docker:host' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check docker
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@docker:host' cmd.run 'docker ps'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/k8s.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/k8s.yaml
deleted file mode 100644
index e9c17ec..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/k8s.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-- description: Install etcd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@etcd:server' state.sls etcd.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the etcd health
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@etcd:server' cmd.run '. /var/lib/etcd/configenv && etcdctl cluster-health'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Kubernetes and Calico
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kubernetes:pool' state.sls kubernetes.pool;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kubernetes:pool' cmd.run 'calicoctl node status';
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: Setup NAT for Calico
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@etcd:server' --subset 1 state.sls etcd.server.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Run whole master to check consistency
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Restart kubelet
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:pool' service.restart kubelet
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Register addons
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master' --subset 1 state.sls kubernetes.master.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/salt.yaml
deleted file mode 100644
index d16a126..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/salt.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-{% from 'virtual-mcp11-k8s-calico-minimal/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-k8s-calico-minimal/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp11-k8s-calico-minimal/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.ADJUST_K8S_OPTS() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--user-data-cfg01.yaml
deleted file mode 100644
index ba76ad8..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,93 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifdown ens4
- - sudo ifup ens4
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - eatmydata apt-get clean && apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
- auto ens4
- iface ens4 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--user-data1604.yaml
deleted file mode 100644
index 23b112f..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,82 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifup ens4
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
- auto ens4
- iface ens4 inet dhcp
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay.yaml
deleted file mode 100644
index 2e92e1b..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay.yaml
+++ /dev/null
@@ -1,270 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp11-k8s-calico-minimal/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp11-k8s-calico-minimal/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp11-k8s-calico-minimal/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp11-k8s-calico-minimal.local') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp11-k8s-calico-minimal_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- ip_ranges:
- dhcp: [+90, -10]
-
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- ip_ranges:
- dhcp: [+90, -10]
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: true
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 3072
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: k8s_controller
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/core.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/core.yaml
deleted file mode 100644
index 1be9ba1..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/core.yaml
+++ /dev/null
@@ -1,115 +0,0 @@
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Create and distribute SSL certificates for services using salt state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*'
- state.sls salt.minion.cert
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install docker
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@docker:host' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: Check docker
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@docker:host' cmd.run 'docker ps'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keepalived on first node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived on whole cluster
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install haproxy.service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Isntall SL core services
-#- description: Install glusterfs
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@glusterfs:server' state.sls glusterfs.server.service
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-#- description: Setup glusterfs on primary controller
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 2, delay: 5}
-# skip_fail: false
-
-#- description: Check the gluster status
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-#- description: Install gluserfs client
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@glusterfs:client' state.sls glusterfs.client
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install nginx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog for haproxy logs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/k8s.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/k8s.yaml
deleted file mode 100644
index 04b46a3..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/k8s.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Kubernetes
-- description: Install etcd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@etcd:server' state.sls etcd.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: Check the etcd health
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@etcd:server' cmd.run '. /var/lib/etcd/configenv && etcdctl cluster-health'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Kubernetes addons
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: Install Kubernetes and Calico
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:pool' state.sls kubernetes.pool
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: Setup NAT for Calico
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@etcd:server and *01*' state.sls etcd.server.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Run whole master to check consistency
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Register addons
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master and *01*' state.sls kubernetes.master.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart kubelet
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:pool' service.restart kubelet
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Renew hosts file on a whole cluster
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '*' state.sls linux.network.host;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Get kubeconfig
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:control and *01*' cmd.run 'cat /etc/kubernetes/admin-kube-config && echo "Salt command execution success"'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
deleted file mode 100644
index 36a0228..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% set ENABLE_COMPUTES_SELF_REGISTER = os_env('ENABLE_COMPUTES_SELF_REGISTER', '') %}
-# Different templates using this mechanism.
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.ADJUST_K8S_OPTS() }}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{%- if ENABLE_COMPUTES_SELF_REGISTER != '' %}
-{{ SHARED.REGISTER_COMPUTE_NODES() }}
-{%- endif %}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml
deleted file mode 100644
index 70d9a3b..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml
+++ /dev/null
@@ -1,236 +0,0 @@
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-k8s-calico/salt.yaml' import ENVIRONMENT_MODEL_INVENTORY_NAME with context %}
-
-
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-# Install MongoDB for alerta
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Create MongoDB cluster
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 20}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
- cmd: |
- FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Fluentd service presence: ${FLUENTD_SERVICE}";
- if [[ "$FLUENTD_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- else
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
- cmd: |
- CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Ceilometer service presence: ${CEILO}";
- if [[ "$CEILO" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Launch containers
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 504fd80..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifdown ens4
- - sudo ifup ens4
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
- auto ens4
- iface ens4 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data1604.yaml
deleted file mode 100644
index 6fd3272..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifup ens4
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
- auto ens4
- iface ens4 inet dhcp
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
deleted file mode 100644
index 1e3df5b..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
+++ /dev/null
@@ -1,414 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp11-k8s-calico/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp11-k8s-calico/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp11-k8s-calico/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp11-k8s-calico.local') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp11-k8s-calico_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: true
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: k8s_controller
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 1024
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/core.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/core.yaml
deleted file mode 100644
index 840134d..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/core.yaml
+++ /dev/null
@@ -1,130 +0,0 @@
-{% from 'virtual-mcp11-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Create and distribute SSL certificates for services using salt state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls salt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Install docker
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@docker:host' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check docker
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@docker:host' cmd.run 'docker ps'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keepalived on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run "rabbitmqctl cluster_status"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-#- description: Install glusterfs
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@glusterfs:server' state.sls glusterfs.server.service
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-#- description: Setup glusterfs on primary controller
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 2, delay: 5}
-# skip_fail: false
-
-#- description: Check the gluster status
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-# Isntall SL core services
-
-#- description: Install gluserfs client
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@glusterfs:client' state.sls glusterfs.client
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install nginx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/juniper.conf b/tcp_tests/templates/virtual-mcp11-k8s-contrail/juniper.conf
deleted file mode 100644
index 398a257..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/juniper.conf
+++ /dev/null
@@ -1,110 +0,0 @@
-## Last commit: 2017-05-18 08:39:52 UTC by root
-version 12.1X46-D20.5;
-system {
- host-name vsrx1;
- root-authentication {
- encrypted-password "$1$gpbfk/Jr$lF2foqHYBd/Sp56dlmkXH1"; ## SECRET-DATA
- }
- name-server {
- 8.8.8.8;
- 8.8.4.4;
- }
- services {
- ssh;
- web-management {
- http {
- interface ge-0/0/0.0;
- }
- }
- }
- syslog {
- file messages {
- any any;
- }
- }
- license {
- autoupdate {
- url https://ae1.juniper.net/junos/key_retrieval;
- }
- }
- ntp {
- peer 46.243.48.4;
- peer 147.251.48.140;
- peer 46.243.48.88;
- }
-}
-interfaces {
- ge-0/0/0 {
- unit 0 {
- family inet {
- address 172.16.10.90/24;
- }
- }
- }
- ge-0/0/1 {
- unit 0 {
- family inet {
- address 192.168.10.90/24;
- }
- }
- }
- ge-0/0/2 {
- unit 0 {
- family inet {
- address 10.70.0.91/24;
- }
- }
- }
-}
-routing-options {
- route-distinguisher-id 172.16.10.90;
- autonomous-system 64512;
- dynamic-tunnels {
- dynamic_overlay_tunnels {
- source-address 172.16.10.90;
- gre;
- destination-networks {
- 172.16.10.0/24;
- }
- }
- }
-}
-protocols {
- mpls {
- interface all;
- }
- bgp {
- group Contrail_Controller {
- type internal;
- local-address 172.16.10.90;
- keep all;
- family inet-vpn {
- unicast;
- }
- allow 172.16.10.0/24;
- }
- }
-}
-security {
- forwarding-options {
- family {
- mpls {
- mode packet-based;
- }
- }
- }
-}
-routing-instances {
- public {
- instance-type vrf;
- interface ge-0/0/1.0;
- vrf-target target:64512:10000;
- vrf-table-label;
- routing-options {
- static {
- route 192.168.10.0/24 discard;
- route 0.0.0.0/0 next-hop 192.168.10.1;
- }
- }
- }
-}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/juniper.conf.template b/tcp_tests/templates/virtual-mcp11-k8s-contrail/juniper.conf.template
deleted file mode 100644
index e7eed4a..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/juniper.conf.template
+++ /dev/null
@@ -1,110 +0,0 @@
-## Last commit: 2017-05-18 08:39:52 UTC by root
-version 12.1X46-D20.5;
-system {
- host-name vsrx1;
- root-authentication {
- encrypted-password "$1$gpbfk/Jr$lF2foqHYBd/Sp56dlmkXH1"; ## SECRET-DATA
- }
- name-server {
- 8.8.8.8;
- 8.8.4.4;
- }
- services {
- ssh;
- web-management {
- http {
- interface ge-0/0/0.0;
- }
- }
- }
- syslog {
- file messages {
- any any;
- }
- }
- license {
- autoupdate {
- url https://ae1.juniper.net/junos/key_retrieval;
- }
- }
- ntp {
- peer 46.243.48.4;
- peer 147.251.48.140;
- peer 46.243.48.88;
- }
-}
-interfaces {
- ge-0/0/0 {
- unit 0 {
- family inet {
- address {{ private_address }}/24;
- }
- }
- }
- ge-0/0/1 {
- unit 0 {
- family inet {
- address {{ public_address }}/24;
- }
- }
- }
- ge-0/0/2 {
- unit 0 {
- family inet {
- address {{ admin_address }};
- }
- }
- }
-}
-routing-options {
- route-distinguisher-id {{ private_address }};
- autonomous-system 64512;
- dynamic-tunnels {
- dynamic_overlay_tunnels {
- source-address {{ private_address }};
- gre;
- destination-networks {
- {{ private_network }}/24;
- }
- }
- }
-}
-protocols {
- mpls {
- interface all;
- }
- bgp {
- group Contrail_Controller {
- type internal;
- local-address 172.16.10.90;
- keep all;
- family inet-vpn {
- unicast;
- }
- allow {{ private_network }}/24;
- }
- }
-}
-security {
- forwarding-options {
- family {
- mpls {
- mode packet-based;
- }
- }
- }
-}
-routing-instances {
- public {
- instance-type vrf;
- interface ge-0/0/1.0;
- vrf-target target:64512:10000;
- vrf-table-label;
- routing-options {
- static {
- route {{ public_network }} discard;
- route 0.0.0.0/0 next-hop {{ public_network_gateway }};
- }
- }
- }
-}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/k8s.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/k8s.yaml
deleted file mode 100644
index 5b35289..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/k8s.yaml
+++ /dev/null
@@ -1,137 +0,0 @@
-{% from 'virtual-mcp11-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-- description: Install etcd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@etcd:server' state.sls etcd.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the etcd health
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@etcd:server' cmd.run '. /var/lib/etcd/configenv && etcdctl cluster-health'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- # Opencontrail Control Plane
-
-- description: Install Opencontrail db on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail db on all nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database' state.sls opencontrail.database
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail control on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail control on all nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail on collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# OpenContrail vrouters
-- description: Install Opencontrail client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Wake up vhost0
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:compute' cmd.run 'exec 0>&-; exec 1>&-; exec 2>&-;
- nohup bash -c "ip link | grep vhost && echo no_reboot || sleep 5 && reboot & "' && sleep 30
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Wait for salt-minions wake up after restart
- cmd: salt --timeout=15 --hard-crash --state-output=mixed --state-verbose=False '*' test.ping
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 25, delay: 30}
- skip_fail: false
-
-- description: Install Opencontrail client on computes
- cmd: sleep 15 && salt --timeout=60 --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls 'opencontrail.client'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail on computes #2
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls opencontrail
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-# Kubernetes
-- description: Install Kubernetes Addons
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Install Kubernetes components
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:pool' state.sls kubernetes.pool
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 60}
- skip_fail: false
-
-- description: Run Kubernetes master without setup
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: true
-
-- description: Run Kubernetes master setup
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master and *01*' state.sls kubernetes.master.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Restart Kubelet
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:pool' service.restart 'kubelet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Renew hosts file on a whole cluster
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '*' state.sls linux.network.host;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
deleted file mode 100644
index d0844bc..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-{% from 'virtual-mcp11-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-k8s-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp11-k8s-contrail/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-
-{% set ENABLE_COMPUTES_SELF_REGISTER = os_env('ENABLE_COMPUTES_SELF_REGISTER', '') %}
-# Different templates using this mechanism.
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS(IS_CONTRAIL_LAB=true) }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.ADJUST_K8S_OPTS() }}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{%- if ENABLE_COMPUTES_SELF_REGISTER != '' %}
-{{ SHARED.REGISTER_COMPUTE_NODES() }}
-{%- endif %}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml
deleted file mode 100644
index a5b37a2..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml
+++ /dev/null
@@ -1,249 +0,0 @@
-{% from 'virtual-mcp11-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-k8s-contrail/salt.yaml' import ENVIRONMENT_MODEL_INVENTORY_NAME with context %}
-
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-# Install MongoDB for alerta
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Create MongoDB cluster
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 20}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
- cmd: |
- FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Fluentd service presence: ${FLUENTD_SERVICE}";
- if [[ "$FLUENTD_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- else
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
- cmd: |
- CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Ceilometer service presence: ${CEILO}";
- if [[ "$CEILO" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Launch containers
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-###
-# From pipeline-library:
-# if (!common.checkContains('STACK_INSTALL', 'k8s')) {
-# salt.enforceState(master, 'I@docker:swarm and I@prometheus:server', 'heka.remote_collector', true, false)
-# }
-
-#- description: Configure Remote Collector in Docker Swarm for Openstack deployments
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-###
-
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 4f140a0..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,69 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifdown ens4
- - sudo ifdown ens5
- - sudo ifup ens4
- - sudo ifup ens5
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
- auto ens4
- iface ens4 inet dhcp
- auto ens5
- iface ens5 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml
deleted file mode 100644
index 2a41ee3..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup eth0
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifup eth1
- - sudo ifup eth2
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto eth0
- iface eth0 inet dhcp
- auto eth1
- iface eth1 inet dhcp
- auto eth2
- iface eth2 inet dhcp
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1604.yaml
deleted file mode 100644
index 5fc02ce..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifup ens4
- - sudo ifup ens5
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
- auto ens4
- iface ens4 inet dhcp
- auto ens5
- iface ens5 inet dhcp
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml
deleted file mode 100644
index 33b68d7..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml
+++ /dev/null
@@ -1,581 +0,0 @@
-# This environment requires 50.5 GB of RAM and 270GB of Storage. Run with caution.
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp11-k8s-contrail/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp11-k8s-contrail/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp11-k8s-contrail/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml' as CLOUDINIT_USER_DATA_1404 with context %}
-
----
-aliases:
-# e1000 is not able to serve with multicasts, so keepalived is not
-# working correctly. in any contrail model virtio model
-# should be used.
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_1404 {{ CLOUDINIT_USER_DATA_1404 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp11-k8s-contrail') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_NTW01 = os_env('HOSTNAME_NTW01', 'ntw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_NTW02 = os_env('HOSTNAME_NTW02', 'ntw02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_NTW03 = os_env('HOSTNAME_NTW03', 'ntw03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_VSRX01 = os_env('HOSTNAME_VSRX01', 'vsrx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp11-k8s-contrail_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
-
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '172.16.10.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_NTW01 }}: +110
- default_{{ HOSTNAME_NTW02 }}: +111
- default_{{ HOSTNAME_NTW03 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_VSRX01 }}: +90
- ip_ranges:
- dhcp: [+90, -10]
-
- public-pool01:
- net: {{ os_env('PUBLIC_ADDRESS_POOL01', '192.168.10.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_NTW01 }}: +110
- default_{{ HOSTNAME_NTW02 }}: +111
- default_{{ HOSTNAME_NTW03 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_VSRX01 }}: +90
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_NTW01 }}: +110
- default_{{ HOSTNAME_NTW02 }}: +111
- default_{{ HOSTNAME_NTW03 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_VSRX01 }}: +90
- ip_ranges:
- dhcp: [+10, -10]
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: True
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- private: private-pool01
- public: public-pool01
- admin: admin-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: true
-
- public:
- address_pool: public-pool01
- dhcp: true
- forward:
- mode: nat
-
- admin:
- address_pool: admin-pool01
- dhcp: true
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- - name: cloudimage1404
- source_image: !os_env IMAGE_PATH1404
- format: qcow2
-
- - name: vsrx_image
- source_image: !os_env IMAGE_VSRX
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: public
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: admin
- interface_model: *interface_model
-
- network_config:
- ens3:
- networks:
- - public
- ens4:
- networks:
- - private
- ens5:
- networks:
- - admin
-
- - name: {{ HOSTNAME_CTL01 }}
- role: k8s_controller
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: public
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: admin
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - public
- ens4:
- networks:
- - private
- ens5:
- networks:
- - admin
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('MON_NODE_CPU', 1) }}
- memory: {{ os_env('MON_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('MON_NODE_CPU', 1) }}
- memory: {{ os_env('MON_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('MON_NODE_CPU', 1) }}
- memory: {{ os_env('MON_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_NTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1404
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1404
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_NTW02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1404
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1404
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_NTW03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1404
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1404
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 1024
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_VSRX01 }}
- role: vsrx
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 512
- boot:
- - hd
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 10
- backing_store: vsrx_image
- format: qcow2
- #- name: iso
- #- capacity: 1
- #- format: raw
- #- device: cdrom
- #- bus: ide
- #- cloudinit_user_data: !include juniper.conf
-
- interfaces:
- - label: ge-0/0/0
- l2_network_device: private
- interface_model: *interface_model
- mac_address: 52:54:00:4e:b4:36
- - label: ge-0/0/1
- l2_network_device: public
- interface_model: *interface_model
- mac_address: 52:54:00:e1:44:9d
- - label: ge-0/0/2
- l2_network_device: admin
- interface_model: *interface_model
- mac_address: 52:54:00:72:08:77
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/core.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/core.yaml
deleted file mode 100644
index 2356475..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/core.yaml
+++ /dev/null
@@ -1,118 +0,0 @@
-{% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml
deleted file mode 100644
index afe3b21..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml
+++ /dev/null
@@ -1,291 +0,0 @@
-{% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04 --provider:network_type gre'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01 --ha False'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-# Configure cinder-volume salt-call
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/salt.yaml
deleted file mode 100644
index 27999e1..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/salt.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-{% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-#
-#- description: Hack gtw node
-# cmd: salt 'gtw*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: Hack cmp01 node
-# cmd: salt 'cmp01*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: Hack cmp02 node
-# cmd: salt 'cmp02*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 600f6fb..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,90 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--user-data1604.yaml
deleted file mode 100644
index 9852e2c..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,83 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc hugepages
-
- # Enable on nodes hugepages
- - echo 2048 > /proc/sys/vm/nr_hugepages
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay.yaml
deleted file mode 100644
index c396bcd..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay.yaml
+++ /dev/null
@@ -1,432 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp11-ovs-dpdk/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp11-ovs-dpdk/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp11-ovs-dpdk/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp11-ovs-dpdk') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp11-ovs-dpdk_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: True
- use_hugepages: True
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: nat
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
-
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 12
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- numa:
- - cpus: 0,1,2,3,4,5
- memory: 4096
- - cpus: 6,7,8,9,10,11
- memory: 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 12
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- numa:
- - cpus: 0,1,2,3,4,5
- memory: 4096
- - cpus: 6,7,8,9,10,11
- memory: 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/core.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/core.yaml
deleted file mode 100644
index 7d13f72..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/core.yaml
+++ /dev/null
@@ -1,118 +0,0 @@
-{% from 'virtual-mcp11-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/openstack.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/openstack.yaml
deleted file mode 100644
index 66d7614..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/openstack.yaml
+++ /dev/null
@@ -1,256 +0,0 @@
-################### Install OpenStack control ##########################
-
-# // Install horizon dashboard
-# salt.enforceState(saltMaster, 'I@horizon:server', 'horizon', true)
-# salt.enforceState(saltMaster, 'I@nginx:server', 'nginx', true)
-
-- description: Install Horizon
- do: enforceState
- target: I@horizon:server
- state: horizon
-
-- description: Update certs on nginx servers
- do: enforceState
- target: I@nginx:server
- state: salt.minion.cert
-
-- description: Install nginx
- do: enforceState
- target: I@nginx:server
- state: nginx
-
-# // setup keystone service
-# //runSaltProcessStep(saltMaster, 'I@keystone:server', 'state.sls', ['keystone.server'], 1)
-# salt.enforceState(saltMaster, 'I@keystone:server and *01*', 'keystone.server', true)
-# salt.enforceState(saltMaster, 'I@keystone:server', 'keystone.server', true)
-# // populate keystone services/tenants/roles/users
-
-- description: Install Keystone on 01
- do: enforceState
- target: I@keystone:server and *01*
- state: keystone.server
- retry: {count: 3, delay: 5}
-
-- description: Install Keystone
- do: enforceState
- target: I@keystone:server
- state: keystone.server
- retry: {count: 3, delay: 5}
-
-# // keystone:client must be called locally
-# //salt.runSaltProcessStep(saltMaster, 'I@keystone:client', 'cmd.run', ['salt-call state.sls keystone.client'], null, true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'service.restart', ['apache2'])
-# salt.enforceState(saltMaster, 'I@keystone:client', 'keystone.client', true)
-# salt.enforceState(saltMaster, 'I@keystone:client', 'keystone.client', true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; keystone service-list'], null, true)
-
-# - description: Install Keystone client
-# do: runState
-# target: I@keystone:client
-# state: cmd.run
-# args: ['salt-call state.sls keystone.client']
-
-- description: Restart apache on Keystone servers
- do: runState
- target: I@keystone:server
- state: service.restart
- args: ['apache2']
-
-- description: Install Keystone Client
- do: enforceState
- target: I@keystone:client
- state: keystone.client
-
-- description: Install Keystone Client
- do: enforceState
- target: I@keystone:client
- state: keystone.client
-
-- description: Show Keystone config
- do: runState
- target: I@keystone:server
- state: cmd.run
- args: ['. /root/keystonerc; keystone service-list']
-
-
-# // Install glance and ensure glusterfs clusters
-# //runSaltProcessStep(saltMaster, 'I@glance:server', 'state.sls', ['glance.server'], 1)
-# salt.enforceState(saltMaster, 'I@glance:server and *01*', 'glance.server', true)
-# salt.enforceState(saltMaster, 'I@glance:server', 'glance.server', true)
-# salt.enforceState(saltMaster, 'I@glance:server', 'glusterfs.client', true)
-
-
-- description: Install glance on 01
- do: enforceState
- target: I@glance:server and *01*
- state: glance.server
-
-- description: Install glance
- do: enforceState
- target: I@glance:server
- state: glance.server
-
-- description: Install gluster client on glance servers
- do: enforceState
- target: I@glance:server
- state: glusterfs.client
-
-# // Update fernet tokens before doing request on keystone server
-# salt.enforceState(saltMaster, 'I@keystone:server', 'keystone.server', true)
-
-- description: Update fernet tokens
- do: enforceState
- target: I@keystone:server
- state: keystone.server
-
-# // Check glance service
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; glance image-list'], null, true)
-
-- description: Show glance images via keystone node
- do: runState
- target: I@keystone:server
- state: cmd.run
- args: ['. /root/keystonerc; glance image-list']
-
-# // Install and check nova service
-# //runSaltProcessStep(saltMaster, 'I@nova:controller', 'state.sls', ['nova'], 1)
-# salt.enforceState(saltMaster, 'I@nova:controller and *01*', 'nova.controller', true)
-# salt.enforceState(saltMaster, 'I@nova:controller', 'nova.controller', true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova service-list'], null, true)
-
-- description: Install nova on controllers on 01
- do: enforceState
- target: I@nova:controller and *01*
- state: nova.controller
-
-- description: Install Keystone
- do: enforceState
- target: I@nova:controller
- state: nova.controller
-
-- description: Show nova services via keystone node
- do: runState
- target: I@keystone:server
- state: cmd.run
- args: ['. /root/keystonerc; nova service-list']
-
-
-
-# // Install and check cinder service
-# //runSaltProcessStep(saltMaster, 'I@cinder:controller', 'state.sls', ['cinder'], 1)
-# salt.enforceState(saltMaster, 'I@cinder:controller and *01*', 'cinder', true)
-# salt.enforceState(saltMaster, 'I@cinder:controller', 'cinder', true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; cinder list'], null, true)
-
-
-- description: Install cinder on controllers on 01
- do: enforceState
- target: I@cinder:controller and *01*
- state: cinder
-
-- description: Install cinder on controllers
- do: enforceState
- target: I@cinder:controller
- state: cinder
-
-- description: Show cinder list via keystone node
- do: runState
- target: I@keystone:server
- state: cmd.run
- args: ['. /root/keystonerc; nova list']
-
-
-# // Install neutron service
-# //runSaltProcessStep(saltMaster, 'I@neutron:server', 'state.sls', ['neutron'], 1)
-
-# salt.enforceState(saltMaster, 'I@neutron:server and *01*', 'neutron.server', true)
-# salt.enforceState(saltMaster, 'I@neutron:server', 'neutron.server', true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron agent-list'], null, true)
-
-- description: Install neutron on controllers on 01
- do: enforceState
- target: I@neutron:server and *01*
- state: neutron.server
-
-- description: Install neutron on controllers
- do: enforceState
- target: I@neutron:server
- state: neutron.server
-
-- description: Show neutron agent list via keystone node
- do: runState
- target: I@keystone:server
- state: cmd.run
- args: ['. /root/keystonerc; neutron agent-list']
-
-# // Install heat service
-# //runSaltProcessStep(saltMaster, 'I@heat:server', 'state.sls', ['heat'], 1)
-# salt.enforceState(saltMaster, 'I@heat:server and *01*', 'heat', true)
-# salt.enforceState(saltMaster, 'I@heat:server', 'heat', true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; heat resource-type-list'], null, true)
-
-- description: Install heat on controllers on 01
- do: enforceState
- target: I@heat:server and *01*
- state: heat
-
-- description: Install heat on controllers
- do: enforceState
- target: I@heat:server
- state: heat
-
-- description: Show heat resource type list via keystone node
- do: runState
- target: I@keystone:server
- state: cmd.run
- args: ['. /root/keystonerc; heat resource-type-list']
-
-# // Restart nova api
-# salt.runSaltProcessStep(saltMaster, 'I@nova:controller', 'service.restart', ['nova-api'])
-
-- description: Restart nova-api
- do: runState
- target: I@nova:controller
- state: service.restart
- args: ['nova-api']
-
-################### Install OpenStack network ##########################
-
-# // Apply gateway
-# salt.runSaltProcessStep(saltMaster, 'I@neutron:gateway', 'state.apply', [], null, true)
-
-- description: Apply gateway
- do: runState
- target: I@neutron:gateway
- state: state.apply
-
-# // Pring information
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron net-list'], null, true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova net-list'], null, true)
-
-- description: Show neutron networks via keystone node
- do: runState
- target: I@keystone:server
- state: cmd.run
- args: ['. /root/keystonerc; neutron net-list']
-
-- description: Show nova networks via keystone node
- do: runState
- target: I@keystone:server
- state: cmd.run
- args: ['. /root/keystonerc; nova net-list']
-
-
-################### Install OpenStack compute ##########################
-
-# //orchestrate.installOpenstackMkCompute(saltMaster, physical)
-# // Configure compute nodes
-# retry(2) {
-# salt.runSaltProcessStep(saltMaster, 'I@nova:compute', 'state.apply', [], null, true)
-# }
-
-- description: Install Nova compute
- do: runState
- target: I@nova:compute
- state: state.apply
- retry: {count: 2, delay: 5}
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/salt.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/salt.yaml
deleted file mode 100644
index 46c02a0..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/salt.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-{% from 'virtual-mcp11-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp11-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-#- description: Hack gtw node
-# cmd: salt 'gtw*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: Hack cmp01 node
-# cmd: salt 'cmp01*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: Hack cmp02 node
-# cmd: salt 'cmp02*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--user-data-cfg01.yaml
deleted file mode 100644
index ecd79db..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,83 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--user-data1604.yaml
deleted file mode 100644
index 29229d1..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/underlay.yaml
deleted file mode 100644
index 40ea763..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay.yaml
+++ /dev/null
@@ -1,418 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp11-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp11-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp11-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp11-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp11-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: nat
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
- # http://apt.tcpcloud.eu/images/ubuntu-16-04-x64-201608231004.qcow2
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/core.yaml b/tcp_tests/templates/virtual-mcp11-ovs/core.yaml
deleted file mode 100644
index 7daf069..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs/core.yaml
+++ /dev/null
@@ -1,125 +0,0 @@
-{% from 'virtual-mcp11-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 20}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml
deleted file mode 100644
index f9dca1d..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml
+++ /dev/null
@@ -1,360 +0,0 @@
-{% from 'virtual-mcp11-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp11-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp11-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp11-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Restart cinder volume
- cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp11-ovs/salt.yaml
deleted file mode 100644
index cfa0272..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs/salt.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-{% from 'virtual-mcp11-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp11-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-#- description: WR run linux state to fix hosts
-# cmd: salt "cfg*" state.sls linux
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: true
-
-#- description: Hack gtw node
-# cmd: salt 'gtw*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.94/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: Hack cmp01 node
-# cmd: salt 'cmp01*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.95/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: Hack cmp02 node
-# cmd: salt 'cmp02*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.96/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp11-ovs/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-ovs/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 600f6fb..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,90 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp11-ovs/underlay--user-data1604.yaml
deleted file mode 100644
index df91bee..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/underlay.yaml b/tcp_tests/templates/virtual-mcp11-ovs/underlay.yaml
deleted file mode 100644
index bd52ae0..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs/underlay.yaml
+++ /dev/null
@@ -1,421 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp11-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp11-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp11-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp11-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp11-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +91
- default_{{ HOSTNAME_CTL02 }}: +92
- default_{{ HOSTNAME_CTL03 }}: +93
- default_{{ HOSTNAME_CMP01 }}: +95
- default_{{ HOSTNAME_CMP02 }}: +96
- default_{{ HOSTNAME_GTW01 }}: +94
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +91
- default_{{ HOSTNAME_CTL02 }}: +92
- default_{{ HOSTNAME_CTL03 }}: +93
- default_{{ HOSTNAME_CMP01 }}: +95
- default_{{ HOSTNAME_CMP02 }}: +96
- default_{{ HOSTNAME_GTW01 }}: +94
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: nat
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
-
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
index b63207a..21dc19d 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
@@ -73,7 +73,7 @@
retry: {count: 1, delay: 10}
skip_fail: false
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog" "runtest" "auditd" "logrotate" "gnocchi" "manila" "jenkins" "glusterfs"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog" "runtest" "auditd" "logrotate" "gnocchi" "manila" "jenkins" "glusterfs" "neutron"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data1604.yaml
index 5a02d24..c5fc670 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data1604.yaml
@@ -27,6 +27,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
# Prepare network connection
- sudo ifup ens3
#- sudo route add default gw {gateway} {interface_name}
@@ -70,3 +73,26 @@
ServerAliveCountMax 10
StrictHostKeyChecking no
UserKnownHostsFile /dev/null
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/core.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/core.yaml
deleted file mode 100644
index 4d69c89..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs/core.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-- description: remove apparmor
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- '*' cmd.run 'service apparmor stop; service apparmor teardown; update-rc.d -f apparmor remove; apt-get -y remove apparmor'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
deleted file mode 100644
index ed312b8..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
+++ /dev/null
@@ -1,231 +0,0 @@
-{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-offline-pike-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'virtual-offline-pike-ovs') %}
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/' + REPOSITORY_SUITE + '/docker/xenial xenial stable') %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-- description: Install bind if pillars 'bind:server' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@bind:server' match.pillar 'bind:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@bind:server' state.sls bind;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@designate:server' state.sls designate -b 1
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-# Configure cinder-volume salt-call
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Restart cinder volume
- cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh b/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh
deleted file mode 100755
index a33e90f..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/bash
-
-. /home/jenkins/fuel-devops30/bin/activate
-pip install -r ./tcp_tests/requirements.txt -U
-pip install psycopg2
-
-export ENV_NAME=virtual-offline-pike-ovs
-export VENV_PATH=/home/jenkins/fuel-devops30
-export IMAGE_PATH1604=/home/jenkins/images/xenial-server-cloudimg-amd64.qcow2
-export SHUTDOWN_ENV_ON_TEARDOWN=false
-export PYTHONIOENCODING=UTF-8
-export LAB_CONFIG_NAME=virtual-offline-pike-ovs
-export CLUSTER_NAME=virtual-offline-pike-ovs
-export REPOSITORY_SUITE=2018.1
-export SALT_VERSION=2017.7
-export DISTRIB_CODENAME=xenial
-
-export TEST_GROUP=test_mcp_pike_ovs_install
-export RUN_TEMPEST=true
-
-# Offline deploy parameters
-export SALT_MODELS_REF_CHANGE=refs/changes/44/15144/1
-
-export BOOTSTRAP_TIMEOUT=1200
-
-export HOST_APT=10.170.0.226
-export HOST_SALTSTACK=10.170.0.226
-export HOST_ARCHIVE_UBUNTU=10.170.0.226
-export HOST_MIRROR_MCP_MIRANTIS=10.170.0.226
-export HOST_MIRROR_FUEL_INFRA=10.170.0.226
-export HOST_PPA_LAUNCHPAD=10.170.0.226
-
-export SALT_MODELS_SYSTEM_REPOSITORY=https://gerrit.mcp.mirantis.local.test/salt-models/reclass-system
-export SALT_FORMULAS_REPO=https://gerrit.mcp.mirantis.local.test/salt-formulas
-export FORMULA_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial ${REPOSITORY_SUITE} salt extra"
-export FORMULA_GPG="http://apt.mirantis.local.test/public.gpg"
-export SALT_REPOSITORY = "deb [arch=amd64] http://mirror.mirantis.local.test/" + REPOSITORY_SUITE+ "/saltstack-" + SALT_VERSION+ "/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main"
-#export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2017.7 main"
-export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main restricted universe"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main restricted universe"
-
-cd tcp_tests
-py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/salt.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/salt.yaml
deleted file mode 100644
index 129ca71..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs/salt.yaml
+++ /dev/null
@@ -1,104 +0,0 @@
-{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_APT01 with context %}
-{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_PRX01 with context %}
-{% from 'virtual-offline-pike-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-offline-pike-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.local.test/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_CONTROL_PREFIX with context %}
-
-#- description: 'Generate nginx cert'
-# cmd: |
-# openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \
-# -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=www.gerrit.com" \
-# -keyout ssl-nginx.key -out ssl-nginx.crt;
-# node_name: {{ HOSTNAME_APT01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-- description: Check nginx APT node is ready
- cmd: systemctl status nginx;
- node_name: {{ HOSTNAME_APT01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check dnsmasq on APT node is ready
- cmd: systemctl status dnsmasq;
- node_name: {{ HOSTNAME_APT01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL03) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_GTW01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_PRX01) }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-- description: 'Workaround of local_repo_url - set to offline image repository structure'
- cmd: |
- find /srv/salt/reclass/classes/cluster -type f -exec sed -i 's/local_repo_url: .*/local_repo_url: mirror.mcp.mirantis.local.test/g' {} +
- find /srv/salt/reclass/classes/cluster -type f -exec sed -i 's/aptly_server_address: .*/aptly_server_address: {{ os_env('HOST_APT') }}/g' {} +
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: '*Workaround* to remove apt key until migrate on CC'
- cmd: salt-key -d apt01.virtual-offline-pike-ovs-dpdk -y
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: true
-
-- description: '*Workaround* stop minion on apt like proxy node'
- cmd: systemctl stop salt-minion.service
- node_name: {{ HOSTNAME_APT01 }}
- retry: {count: 1, delay: 1}
- skip_fail: true
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog" "runtest" "auditd" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-#- description: Hack gtw node
-# cmd: salt 'gtw*' cmd.run "ip addr del {{ IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: Hack cmp01 node
-# cmd: salt 'cmp01*' cmd.run "ip addr del {{ IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: Hack cmp02 node
-# cmd: salt 'cmp02*' cmd.run "ip addr del {{ IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--meta-data.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-apt01.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-apt01.yaml
deleted file mode 100644
index fe2c8f3..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-apt01.yaml
+++ /dev/null
@@ -1,115 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifup ens4
- - sudo ifup ens5
- - sudo ifup ens6
-
- - rm /etc/resolv.conf
- - touch /etc/resolv.conf
- - export LOCAL_DNS_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
- - echo "nameserver $LOCAL_DNS_IP" >> /etc/resolv.conf;
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "supersede domain-name-servers $LOCAL_DNS_IP, 172.18.208.44" >> /etc/dhcp/dhclient.conf
- - export TERM=linux
- - export LANG=C
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## Cloud repo01 node ##################
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - eatmydata apt-get clean && apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y salt-minion python-pip git curl tmux byobu iputils-ping traceroute htop tree ntp;
- - openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=gerrit.mcp.mirantis.local.test" -keyout /root/ssl-nginx.key -out /root/ssl-nginx.crt;
- - cd /tmp;
- - git clone https://github.com/TatyankaLeontovich/salt-formula-nginx;
- - git clone https://github.com/TatyankaLeontovich/salt-dnsmasq;
- - git clone https://github.com/TatyankaLeontovich/underpillar;
- - mkdir -p /srv/pillar/;
- - mkdir -p /srv/salt;
- - cd /srv/salt;
- - ln -s /tmp/salt-formula-nginx/nginx;
- - ln -s /tmp/salt-dnsmasq/dnsmasq;
- - cp /tmp/underpillar/pillar/*.sls /srv/pillar/;
- - cp /tmp/underpillar/states/*.sls /srv/salt/;
- - cp /srv/pillar/top_localdns.sls /srv/pillar/top.sls;
- - cp /srv/salt/top_localdns.sls /srv/salt/top.sls;
- - find /srv/pillar/ -type f -exec sed -i "s/==LOCAL_DNS_IP==/${LOCAL_DNS_IP}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_APT==/{{ os_env('HOST_APT', 'apt.mirantis.com') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_SALTSTACK==/{{ os_env('HOST_SALTSTACK', 'repo.saltstack.com') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_ARCHIVE_UBUNTU==/{{ os_env('HOST_ARCHIVE_UBUNTU', 'archive.ubuntu.com') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_MCP_MIRANTIS==/{{ os_env('HOST_MIRROR_MCP_MIRANTIS', 'mirror.mcp.mirantis.net') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_FUEL_INFRA==/{{ os_env('HOST_MIRROR_FUEL_INFRA', 'mirror.fuel-infra.org') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_PPA_LAUNCHPAD==/{{ os_env('HOST_PPA_LAUNCHPAD', 'ppa.launchpad.net') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.com') }}/g" {} +
- - salt-call --local --state-output=mixed state.sls dnsmasq;
- - salt-call --local --state-output=mixed state.sls nginx;
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
- auto ens4
- iface ens4 inet dhcp
- auto ens5
- iface ens5 inet dhcp
- auto ens6
- iface ens6 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 800a0b1..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
-
- #- sudo route add default gw {gateway} {interface_name}
- - rm /etc/resolv.conf
- - touch /etc/resolv.conf
- - LOCAL_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1 | cut -d"." -f1-3)
- - export DNS_IP=$LOCAL_IP".122"
- - echo "nameserver $DNS_IP" >> /etc/resolv.conf;
- - echo "nameserver $LOCAL_IP.1" >> /etc/resolv.conf;
- - echo "supersede domain-name-servers $DNS_IP" >> /etc/dhcp/dhclient.conf
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data1604.yaml
deleted file mode 100644
index 5a02d24..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- - rm /etc/resolv.conf
- - touch /etc/resolv.conf
- - LOCAL_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1 | cut -d"." -f1-3)
- - export DNS_IP=$LOCAL_IP".122"
- - echo "nameserver $DNS_IP" >> /etc/resolv.conf;
- - echo "nameserver $LOCAL_IP.1" >> /etc/resolv.conf;
- - echo "supersede domain-name-servers $DNS_IP" >> /etc/dhcp/dhclient.conf
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- - echo "Preparing base OS"
- # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay.yaml
deleted file mode 100644
index 011bb72..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs/underlay.yaml
+++ /dev/null
@@ -1,474 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-offline-pike-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-offline-pike-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'virtual-offline-pike-ovs/underlay--user-data-apt01.yaml' as CLOUDINIT_USER_DATA_APT01 with context %}
-
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_apt01 {{ CLOUDINIT_USER_DATA_APT01 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-offline-pike-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_APT01 = os_env('HOSTNAME_APT01', 'apt01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-offline-pike-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +122
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- l2_network_device: +122
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
- - name: apt_cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env APT_IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_APT01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: apt_cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_apt01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 8
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-offline-ssl/salt.yaml b/tcp_tests/templates/virtual-offline-ssl/salt.yaml
index 9898bec..46ab1e4 100644
--- a/tcp_tests/templates/virtual-offline-ssl/salt.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/salt.yaml
@@ -70,7 +70,7 @@
retry: {count: 1, delay: 1}
skip_fail: true
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog" "dogtag" "runtest" "manila" "auditd" "logrotate" "gnocchi"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog" "dogtag" "runtest" "manila" "auditd" "logrotate" "gnocchi" "neutron" ') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-offline-ssl/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-offline-ssl/underlay--user-data1604.yaml
index 5a02d24..c5fc670 100644
--- a/tcp_tests/templates/virtual-offline-ssl/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/underlay--user-data1604.yaml
@@ -27,6 +27,9 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
# Prepare network connection
- sudo ifup ens3
#- sudo route add default gw {gateway} {interface_name}
@@ -70,3 +73,26 @@
ServerAliveCountMax 10
StrictHostKeyChecking no
UserKnownHostsFile /dev/null
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
+
+ growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
deleted file mode 100644
index 81d958d..0000000
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
+++ /dev/null
@@ -1,156 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: virtual-pike-ovs-dpdk.local
- cluster_name: virtual-pike-ovs-dpdk
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: ${_param:openstack_control_node01_address}
-# infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: ${_param:openstack_control_node01_hostname}
- infra_kvm02_control_address: ${_param:openstack_control_node02_address}
-# infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: ${_param:openstack_control_node02_hostname}
- infra_kvm03_control_address: ${_param:openstack_control_node03_address}
-# infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: ${_param:openstack_control_node03_hostname}
- infra_kvm_vip_address: ${_param:openstack_control_address}
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.4.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.4.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.4.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.4.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 10.167.4.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.4.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.4.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.4.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'True'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_hugepages_count: '600'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_nova_cpu_pinning: '3'
- openstack_ovs_dvr_enabled: 'False'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: pike
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_branch: master
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- stacklight_enabled: 'False'
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-environment.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-environment.yaml
deleted file mode 100644
index a5640d1..0000000
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-environment.yaml
+++ /dev/null
@@ -1,155 +0,0 @@
-nodes:
- cfg01.virtual-pike-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.virtual-pike-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.virtual-pike-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.virtual-pike-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- dbs01.virtual-pike-ovs-dpdk.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- dbs02.virtual-pike-ovs-dpdk.local:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- dbs03.virtual-pike-ovs-dpdk.local:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- msg01.virtual-pike-ovs-dpdk.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- msg02.virtual-pike-ovs-dpdk.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- msg03.virtual-pike-ovs-dpdk.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.virtual-pike-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.virtual-pike-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.virtual-pike-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml
deleted file mode 100644
index 5da2666..0000000
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-pike-ovs-dpdk.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- # Start compute node addresses from .105 , as in static models
- sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/core.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/core.yaml
deleted file mode 100644
index 11a7665..0000000
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml
deleted file mode 100644
index 3aa53f9..0000000
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml
+++ /dev/null
@@ -1,169 +0,0 @@
-{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'virtual-pike-ovs-dpdk') %}
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04 --provider:network_type gre'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01 --ha False'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-# Configure cinder-volume salt-call
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-{{ BACKUP.MACRO_WR_NGINX_MASTER() }}
-{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
-{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
deleted file mode 100644
index bcbcd75..0000000
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja" "runtest" "logrotate" "jenkins"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-- description: Enable hugepages on cmp nodes
- cmd: salt 'cmp*' cmd.run "apt-get install -y hugepages; echo 2048 > /proc/sys/vm/nr_hugepages";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--meta-data.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay.yaml
deleted file mode 100644
index 25cfbc4..0000000
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay.yaml
+++ /dev/null
@@ -1,628 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-pike-ovs-dpdk/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-pike-ovs-dpdk/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-pike-ovs-dpdk') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS = os_env('HOSTNAME_DBS', 'dbs.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS01 = os_env('HOSTNAME_DBS01', 'dbs01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS02 = os_env('HOSTNAME_DBS02', 'dbs02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS03 = os_env('HOSTNAME_DBS03', 'dbs03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG = os_env('HOSTNAME_MSG', 'msg.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG01 = os_env('HOSTNAME_MSG01', 'msg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG02 = os_env('HOSTNAME_MSG02', 'msg02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG03 = os_env('HOSTNAME_MSG03', 'msg03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-pike-ovs-dpdk_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: True
- use_hugepages: True
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
-
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_DBS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_DBS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_DBS03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MSG01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MSG02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MSG03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 12
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- numa:
- - cpus: 0,1,2,3,4,5
- memory: 4096
- - cpus: 6,7,8,9,10,11
- memory: 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 12
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- numa:
- - cpus: 0,1,2,3,4,5
- memory: 4096
- - cpus: 6,7,8,9,10,11
- memory: 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/tests/system/test_3rdparty_suites.py b/tcp_tests/tests/system/test_3rdparty_suites.py
index 0abb89b..00b915d 100644
--- a/tcp_tests/tests/system/test_3rdparty_suites.py
+++ b/tcp_tests/tests/system/test_3rdparty_suites.py
@@ -69,30 +69,28 @@
'stacklight_report.xml')
@pytest.mark.grab_versions
- @pytest.mark.extract(container_system='docker', extract_from='conformance',
- files_to_extract=['report'])
- @pytest.mark.merge_xunit(path='/root/report',
+ @pytest.mark.prepare_log(filepath='/tmp/conformance/conformance.log')
+ @pytest.mark.merge_xunit(path='/tmp/conformance',
output='/root/conformance_result.xml')
- @pytest.mark.grab_k8s_results(name=['k8s_conformance.log',
- 'conformance_result.xml'])
+ @pytest.mark.download(name=['conformance.log',
+ 'conformance_result.xml'])
@pytest.mark.parametrize("_", [settings.ENV_NAME])
@pytest.mark.k8s_conformance
def test_run_k8s_conformance(self, show_step, config, k8s_actions,
- k8s_logs, _):
+ conformance_helper, _):
"""Test run of k8s conformance tests"""
- k8s_actions.run_conformance()
+ k8s_actions.start_conformance_inside_pod()
@pytest.mark.grab_versions
- @pytest.mark.extract(container_system='docker',
- extract_from='mirantis/virtlet',
- files_to_extract=['conformance_virtlet_result.xml'])
- @pytest.mark.grab_k8s_results(name=['virtlet_conformance.log',
- 'conformance_virtlet_result.xml'])
+ @pytest.mark.prepare_log(filepath='/tmp/virtlet-conformance/'
+ 'virtlet_conformance.log')
+ @pytest.mark.merge_xunit(path='/tmp/virtlet-conformance',
+ output='/root/conformance_virtlet_result.xml')
+ @pytest.mark.download(name=['virtlet_conformance.log',
+ 'conformance_virtlet_result.xml'])
@pytest.mark.parametrize("_", [settings.ENV_NAME])
@pytest.mark.k8s_conformance_virtlet
def test_run_k8s_conformance_virtlet(self, show_step, config, k8s_actions,
- k8s_logs, _):
+ conformance_helper, _):
"""Test run of k8s virtlet conformance tests"""
- config.k8s.run_extended_virtlet_conformance = True
- k8s_actions.run_virtlet_conformance(
- report_name="conformance_virtlet_result.xml")
+ k8s_actions.start_conformance_inside_pod(cnf_type='virtlet')
diff --git a/tcp_tests/tests/system/test_cvp_pipelines.py b/tcp_tests/tests/system/test_cvp_pipelines.py
new file mode 100644
index 0000000..5d2e060
--- /dev/null
+++ b/tcp_tests/tests/system/test_cvp_pipelines.py
@@ -0,0 +1,149 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pytest
+
+from tcp_tests import logger
+from tcp_tests import settings
+from tcp_tests.utils import run_jenkins_job
+from tcp_tests.utils import get_jenkins_job_stages
+
+LOG = logger.logger
+
+
+class TestCvpPipelines(object):
+ """Test class for running Cloud Validation Pipelines
+
+ Requires environment variables:
+ ENV_NAME
+ LAB_CONFIG_NAME
+ TESTS_CONFIGS
+ """
+
+ @pytest.mark.grab_versions
+ @pytest.mark.parametrize("_", [settings.ENV_NAME])
+ @pytest.mark.run_cvp_func_smoke
+ def test_run_cvp_func_smoke(self, salt_actions, show_step, _):
+ """Runner for Pipeline CVP - Functional tests
+
+ Scenario:
+ 1. Get CICD Jenkins access credentials from salt
+ 2. Run job cvp-func
+ 3. Get passed stages from cvp-func
+ """
+ salt = salt_actions
+ show_step(1)
+
+ tgt = 'I@docker:client:stack:jenkins and cid01*'
+ jenkins_host = salt.get_single_pillar(
+ tgt=tgt, pillar="jenkins:client:master:host")
+ jenkins_port = salt.get_single_pillar(
+ tgt=tgt, pillar="jenkins:client:master:port")
+ jenkins_url = 'http://{0}:{1}'.format(jenkins_host, jenkins_port)
+ jenkins_user = salt.get_single_pillar(
+ tgt=tgt, pillar="jenkins:client:master:username")
+ jenkins_pass = salt.get_single_pillar(
+ tgt=tgt, pillar="jenkins:client:master:password")
+ jenkins_start_timeout = 60
+ jenkins_build_timeout = 1800
+
+ job_name = 'cvp-func'
+ job_parameters = {
+ 'TARGET_NODE': 'gtw01*',
+ 'TEMPEST_ENDPOINT_TYPE': 'internalURL',
+ 'TEMPEST_TEST_PATTERN': 'set=smoke',
+ }
+ show_step(2)
+ cvp_func_smoke_result = run_jenkins_job.run_job(
+ host=jenkins_url,
+ username=jenkins_user,
+ password=jenkins_pass,
+ start_timeout=jenkins_start_timeout,
+ build_timeout=jenkins_build_timeout,
+ verbose=True,
+ job_name=job_name,
+ job_parameters=job_parameters,
+ job_output_prefix='[ cvp-func/{build_number}:platform {time} ] ')
+
+ show_step(3)
+ (description, stages) = get_jenkins_job_stages.get_deployment_result(
+ host=jenkins_url,
+ username=jenkins_user,
+ password=jenkins_pass,
+ job_name=job_name,
+ build_number='lastBuild')
+
+ LOG.info(description)
+ LOG.info('\n'.join(stages))
+
+ assert cvp_func_smoke_result == 'SUCCESS', "{0}\n{1}".format(
+ description, '\n'.join(stages))
+
+ @pytest.mark.grab_versions
+ @pytest.mark.parametrize("_", [settings.ENV_NAME])
+ @pytest.mark.run_cvp_func_sanity
+ def test_run_cvp_func_sanity(self, salt_actions, show_step, _):
+ """Runner for Pipeline CVP - Functional tests
+
+ Scenario:
+ 1. Get CICD Jenkins access credentials from salt
+ 2. Run job cvp-sanity
+ 3. Get passed stages from cvp-sanity
+ """
+ salt = salt_actions
+ show_step(1)
+
+ tgt = 'I@docker:client:stack:jenkins and cid01*'
+ jenkins_host = salt.get_single_pillar(
+ tgt=tgt, pillar="jenkins:client:master:host")
+ jenkins_port = salt.get_single_pillar(
+ tgt=tgt, pillar="jenkins:client:master:port")
+ jenkins_url = 'http://{0}:{1}'.format(jenkins_host, jenkins_port)
+ jenkins_user = salt.get_single_pillar(
+ tgt=tgt, pillar="jenkins:client:master:username")
+ jenkins_pass = salt.get_single_pillar(
+ tgt=tgt, pillar="jenkins:client:master:password")
+ jenkins_start_timeout = 60
+ jenkins_build_timeout = 1800
+
+ job_name = 'cvp-sanity'
+ job_parameters = {
+ 'TEST_SET': '/var/lib/cvp-sanity/cvp_checks/tests/',
+ }
+
+ show_step(2)
+ cvp_func_sanity_result = run_jenkins_job.run_job(
+ host=jenkins_url,
+ username=jenkins_user,
+ password=jenkins_pass,
+ start_timeout=jenkins_start_timeout,
+ build_timeout=jenkins_build_timeout,
+ verbose=True,
+ job_name=job_name,
+ job_parameters=job_parameters,
+ job_output_prefix='[ cvp-func/{build_number}:platform {time} ] ')
+
+ show_step(3)
+ (description, stages) = get_jenkins_job_stages.get_deployment_result(
+ host=jenkins_url,
+ username=jenkins_user,
+ password=jenkins_pass,
+ job_name=job_name,
+ build_number='lastBuild')
+
+ LOG.info(description)
+ LOG.info('\n'.join(stages))
+
+ assert cvp_func_sanity_result == 'SUCCESS', "{0}\n{1}".format(
+ description, '\n'.join(stages))
diff --git a/tcp_tests/tests/system/test_install_k8s.py b/tcp_tests/tests/system/test_install_k8s.py
index 67d3635..b9621ab 100644
--- a/tcp_tests/tests/system/test_install_k8s.py
+++ b/tcp_tests/tests/system/test_install_k8s.py
@@ -186,18 +186,16 @@
k8s_deployed.run_conformance()
LOG.info("*************** DONE **************")
- @pytest.mark.extract(container_system='docker', extract_from='conformance',
- files_to_extract=['report'])
- @pytest.mark.merge_xunit(path='/root/report',
+ @pytest.mark.prepare_log(filepath='/tmp/conformance/conformance.log')
+ @pytest.mark.merge_xunit(path='/tmp/conformance',
output='/root/conformance_result.xml')
- @pytest.mark.grab_k8s_results(name=['k8s_conformance.log',
- 'conformance_result.xml'])
+ @pytest.mark.download(name=['conformance.log',
+ 'conformance_result.xml'])
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
- @pytest.mark.cz8116
- @pytest.mark.k8s_calico
+ @pytest.mark.k8s_calico_only_k8s
def test_only_k8s_install(self, config, show_step,
- k8s_deployed, k8s_logs):
+ k8s_deployed, conformance_helper):
"""Test for deploying MCP environment with k8s and check it
Scenario:
@@ -211,5 +209,5 @@
if config.k8s.k8s_conformance_run:
show_step(5)
- k8s_deployed.run_conformance()
+ k8s_deployed.start_conformance_inside_pod()
LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index 1077c2f..d10d250 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -399,7 +399,8 @@
openstack_actions.install(commands)
if settings.RUN_TEMPEST:
- tempest_actions.prepare_and_run_tempest()
+ tempest_actions.prepare_and_run_tempest(
+ store_run_test_model=False)
LOG.info("*************** DONE **************")
@pytest.mark.fail_snapshot
@@ -502,10 +503,32 @@
name = 'rally-tempest-net-features:latest'
if settings.RUN_TEMPEST:
- openstack_actions.run_tempest(
+ openstack_actions.prepare_and_run_tempest(
pattern=settings.PATTERN,
conf_name='net_features.conf',
- registry='{0}/{1}'.format(registry, name)
+ registry='{0}/{1}'.format(registry, name),
+ store_run_test_model=False
)
openstack_actions.download_tempest_report()
LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_offline_barbican_install(self, underlay,
+ openstack_deployed,
+ openstack_actions,
+ tempest_actions):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run tempest
+
+ """
+ openstack_actions._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+ if settings.RUN_TEMPEST:
+ tempest_actions.prepare_and_run_tempest(store_run_test_model=False)
+ LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_k8s_actions.py b/tcp_tests/tests/system/test_k8s_actions.py
index 6467a8a..d12fffc 100644
--- a/tcp_tests/tests/system/test_k8s_actions.py
+++ b/tcp_tests/tests/system/test_k8s_actions.py
@@ -122,7 +122,7 @@
assert sample.is_service_available()
show_step(6)
- k8s_deployed.run_conformance(log_out="k8s_conformance.log")
+ k8s_deployed.start_conformance_inside_pod()
show_step(7)
chain_versions = config.k8s.k8s_update_chain.split(" ")
@@ -134,8 +134,7 @@
assert sample.is_service_available()
LOG.info("Running conformance on {} version".format(version))
- log_name = "k8s_conformance_{}.log".format(version)
- k8s_deployed.run_conformance(log_out=log_name, raise_on_err=False)
+ k8s_deployed.start_conformance_inside_pod()
assert sample.is_service_available()
@@ -186,7 +185,7 @@
assert sample.is_service_available(external=True)
show_step(6)
- k8s_deployed.run_conformance()
+ k8s_deployed.start_conformance_inside_pod()
show_step(7)
for sample in samples:
@@ -305,7 +304,7 @@
check_pods_availability()
show_step(12)
- k8s_deployed.run_conformance()
+ k8s_deployed.start_conformance_inside_pod()
show_step(13)
check_pods_availability()
diff --git a/tcp_tests/tests/system/test_ovs_pike_ceph.py b/tcp_tests/tests/system/test_ovs_pike_ceph.py
deleted file mode 100644
index 1f62a94..0000000
--- a/tcp_tests/tests/system/test_ovs_pike_ceph.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2018 Mirantis, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import pytest
-
-from tcp_tests import logger
-from tcp_tests import settings
-
-LOG = logger.logger
-
-
-@pytest.mark.deploy
-class TestInstallOvsPikeCeph(object):
- """Test class for test openstack with ceph and ovs deploy"""
-
- @pytest.mark.grab_versions
- @pytest.mark.fail_snapshot
- def test_pike_ceph_all_ovs_install(self, underlay,
- openstack_deployed,
- ceph_deployed,
- openstack_actions,
- tempest_actions):
- """Test for deploying pike ovs with ceph and check it
- Scenario:
- 1. Prepare salt on hosts
- 2. Setup controller nodes
- 3. Setup compute nodes
- 4. Setup ceph RBD, replication factor 2 for cinder, nova, glance
- 5. Run tempest
-
- """
- openstack_actions._salt.local(
- tgt='*', fun='cmd.run',
- args='service ntp stop; ntpd -gq; service ntp start')
-
- if settings.RUN_TEMPEST:
- tempest_actions.prepare_and_run_tempest()
- LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_virtlet_actions.py b/tcp_tests/tests/system/test_virtlet_actions.py
index d3b6c27..9ca2fe6 100644
--- a/tcp_tests/tests/system/test_virtlet_actions.py
+++ b/tcp_tests/tests/system/test_virtlet_actions.py
@@ -88,12 +88,16 @@
show_step(4)
vm_pod.delete()
+ @pytest.mark.prepare_log(filepath='/tmp/virtlet-conformance/'
+ 'virtlet_conformance.log')
+ @pytest.mark.merge_xunit(path='/tmp/virtlet-conformance',
+ output='/root/report.xml')
+ @pytest.mark.download(name=['virtlet_conformance.log',
+ 'report.xml'])
@pytest.mark.grab_versions
- @pytest.mark.grab_k8s_results(name=['virtlet_conformance.log',
- 'report.xml'])
@pytest.mark.fail_snapshot
def test_virtlet_conformance(self, show_step, config, k8s_deployed,
- k8s_logs):
+ conformance_helper):
"""Test run of virtlet conformance tests
Scenario:
@@ -102,4 +106,4 @@
"""
show_step(1)
- k8s_deployed.run_virtlet_conformance()
+ k8s_deployed.start_conformance_inside_pod(cnf_type='virtlet')
diff --git a/tcp_tests/utils/env_jenkins_cicd b/tcp_tests/utils/env_jenkins_cicd
index 04cdd56..72a8547 100755
--- a/tcp_tests/utils/env_jenkins_cicd
+++ b/tcp_tests/utils/env_jenkins_cicd
@@ -22,12 +22,12 @@
unset JENKINS_START_TIMEOUT
unset JENKINS_BUILD_TIMEOUT
else
- MASTER="get_param.py -C I@docker:client:stack:jenkins pillar.get jenkins:client:master"
- export JENKINS_HOST=$(${CURRENT_DIR}/${MASTER}:host)
- export JENKINS_PORT=$(${CURRENT_DIR}/${MASTER}:port)
+ MASTER_TARGET='I@docker:client:stack:jenkins and cid01*'
+ export JENKINS_HOST=$(${CURRENT_DIR}/get_param.py -C "${MASTER_TARGET}" pillar.get jenkins:client:master:host)
+ export JENKINS_PORT=$(${CURRENT_DIR}/get_param.py -C "${MASTER_TARGET}" pillar.get jenkins:client:master:port)
export JENKINS_URL=http://${JENKINS_HOST}:${JENKINS_PORT}
- export JENKINS_USER=$(${CURRENT_DIR}/${MASTER}:username)
- export JENKINS_PASS=$(${CURRENT_DIR}/${MASTER}:password)
+ export JENKINS_USER=$(${CURRENT_DIR}/get_param.py -C "${MASTER_TARGET}" pillar.get jenkins:client:master:username)
+ export JENKINS_PASS=$(${CURRENT_DIR}/get_param.py -C "${MASTER_TARGET}" pillar.get jenkins:client:master:password)
export JENKINS_START_TIMEOUT=60
export JENKINS_BUILD_TIMEOUT=1800
fi
diff --git a/tcp_tests/utils/get_jenkins_job_stages.py b/tcp_tests/utils/get_jenkins_job_stages.py
index 883494f..b28e1d2 100755
--- a/tcp_tests/utils/get_jenkins_job_stages.py
+++ b/tcp_tests/utils/get_jenkins_job_stages.py
@@ -69,15 +69,15 @@
return parser
-def get_deployment_result(opts):
+def get_deployment_result(host, username, password, job_name, build_number):
"""Get the pipeline job result from Jenkins
Get all the stages resutls from the specified job,
show error message if present.
"""
- jenkins = client.JenkinsClient(host=opts.host,
- username=opts.username,
- password=opts.password)
+ jenkins = client.JenkinsClient(host=host,
+ username=username,
+ password=password)
def get_stages(nodes, indent=0, show_status=True):
res = []
@@ -90,15 +90,15 @@
res.append(msg)
if node['status'] != 'SUCCESS':
- wf = jenkins.get_workflow(opts.job_name, opts.build_number,
+ wf = jenkins.get_workflow(job_name, build_number,
int(node['id']))
if wf is not None:
if 'stageFlowNodes' in wf:
res += get_stages(wf['stageFlowNodes'], indent + 2,
show_status=False)
elif '_links' in wf and 'log' in wf['_links']:
- log = jenkins.get_workflow(opts.job_name,
- opts.build_number,
+ log = jenkins.get_workflow(job_name,
+ build_number,
int(node['id']),
mode='log')
if "text" in log:
@@ -109,8 +109,8 @@
return res
for _ in range(3):
- wf = jenkins.get_workflow(opts.job_name, opts.build_number)
- info = jenkins.build_info(opts.job_name, int(wf['id']))
+ wf = jenkins.get_workflow(job_name, build_number)
+ info = jenkins.build_info(job_name, int(wf['id']))
if info.get('result'):
break
time.sleep(3)
@@ -120,7 +120,7 @@
stages = get_stages(wf['stages'], 0)
if not stages:
msg = wf['status'] + ":\n\n"
- stages = [msg + jenkins.get_build_output(opts.job_name, int(wf['id']))]
+ stages = [msg + jenkins.get_build_output(job_name, int(wf['id']))]
return (build_description, stages)
@@ -133,7 +133,12 @@
parser.print_help()
return 10
else:
- (build_description, stages) = get_deployment_result(opts)
+ (build_description, stages) = get_deployment_result(
+ opts.host,
+ opts.username,
+ opts.password,
+ opts.job_name,
+ opts.build_number)
print(build_description)
print('\n'.join(stages))
diff --git a/tcp_tests/utils/run_jenkins_job.py b/tcp_tests/utils/run_jenkins_job.py
index acc2e9f..e0e7f69 100755
--- a/tcp_tests/utils/run_jenkins_job.py
+++ b/tcp_tests/utils/run_jenkins_job.py
@@ -97,10 +97,10 @@
return parser
-def print_build_header(build, job_params, opts):
+def print_build_header(build, job_params, build_timeout):
print('\n#############################################################')
print('##### Building job [{0}] #{1} (timeout={2}) with the following '
- 'parameters:'.format(build[0], build[1], opts.build_timeout))
+ 'parameters:'.format(build[0], build[1], build_timeout))
print('##### ' + '\n##### '.join(
[str(key) + ": " + str(val) for key, val in job_params.iteritems()]
))
@@ -114,41 +114,44 @@
print('#############################################################\n')
-def run_job(opts):
+def run_job(host, username, password,
+ job_name, job_parameters=None, job_output_prefix='',
+ start_timeout=1800, build_timeout=3600 * 4, verbose=False):
jenkins = JenkinsClient(
- host=opts.host,
- username=opts.username,
- password=opts.password)
+ host=host,
+ username=username,
+ password=password)
- job_params = jenkins.make_defults_params(opts.job_name)
- job_params.update(opts.job_parameters)
+ job_params = jenkins.make_defults_params(job_name)
+ if job_parameters is not None: # job_parameters = {}
+ job_params.update(job_parameters)
- build = jenkins.run_build(opts.job_name,
+ build = jenkins.run_build(job_name,
job_params,
- verbose=opts.verbose,
- timeout=opts.start_timeout)
- if opts.verbose:
- print_build_header(build, job_params, opts)
+ verbose=verbose,
+ timeout=start_timeout)
+ if verbose:
+ print_build_header(build, job_params, build_timeout)
try:
jenkins.wait_end_of_build(
name=build[0],
build_id=build[1],
- timeout=opts.build_timeout,
+ timeout=build_timeout,
interval=1,
- verbose=opts.verbose,
- job_output_prefix=opts.job_output_prefix)
+ verbose=verbose,
+ job_output_prefix=job_output_prefix)
except Exception as e:
print(str(e))
raise
result = jenkins.build_info(name=build[0],
build_id=build[1])['result']
- if opts.verbose:
- print_build_footer(build, result, opts.host)
+ if verbose:
+ print_build_footer(build, result, host)
- return EXIT_CODES.get(result, 2)
+ return result
def main(args=None):
@@ -160,8 +163,17 @@
parser.print_help()
return 10
else:
- exit_code = run_job(opts)
- return exit_code
+ result = run_job(
+ opts.host,
+ opts.username,
+ opts.password,
+ opts.job_name,
+ opts.job_parameters,
+ opts.job_output_prefix,
+ opts.start_timeout,
+ opts.build_timeout,
+ opts.verbose)
+ return EXIT_CODES.get(result, 2)
if __name__ == "__main__":