Merge "Add additional sleep in publisher of test_k8s_cncf_certification"
diff --git a/README.md b/README.md
index 0f0ebd4..fdf0ebb 100644
--- a/README.md
+++ b/README.md
@@ -35,8 +35,8 @@
LAB_CONFIG_NAME variable maps cluster name from the model repository with
the set of templates in the ./tcp_tests/templates/ folder.
```
-export LAB_CONFIG_NAME=virtual-mcp-ocata-dvr # OVS-DVR with ocata packages
-export LAB_CONFIG_NAME=virtual-mcp-ocata-ovs # OVS-NO-DVR with ocata packages
+export LAB_CONFIG_NAME=cookied-mcp-ocata-dvr # OVS-DVR with ocata packages
+export LAB_CONFIG_NAME=cookied-mcp-ocata-ovs # OVS-NO-DVR with ocata packages
export LAB_CONFIG_NAME=virtual-mcp-ocata-cicd # Operational Support System Tools
export LAB_CONFIG_NAME=virtual-mcp11-dvr # OVS-DVR with neutron packages
export LAB_CONFIG_NAME=virtual-mcp11-ovs # OVS-NO-DVR with neutron packages
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
index 7a6daaa..36ea29a 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -96,7 +96,7 @@
export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
export MANAGER=devops
export SHUTDOWN_ENV_ON_TEARDOWN=false
- export BOOTSTRAP_TIMEOUT=1200
+ export BOOTSTRAP_TIMEOUT=1800
export PYTHONIOENCODING=UTF-8
export REPOSITORY_SUITE=${MCP_VERSION}
export TEST_GROUP=test_bootstrap_salt
diff --git a/jobs/pipelines/swarm-create-cfg-config-drive.groovy b/jobs/pipelines/swarm-create-cfg-config-drive.groovy
index c43fb56..72d278f 100644
--- a/jobs/pipelines/swarm-create-cfg-config-drive.groovy
+++ b/jobs/pipelines/swarm-create-cfg-config-drive.groovy
@@ -1,5 +1,6 @@
import java.text.SimpleDateFormat
+def gerrit = new com.mirantis.mk.Gerrit()
def dateFormat = new SimpleDateFormat("yyyyMMddHHmm")
def date = new Date()
def common_scripts_commit = "${COMMON_SCRIPTS_COMMIT}"
@@ -31,28 +32,109 @@
step([$class: 'WsCleanup'])
}
- stage("Get scripts") {
- // apt package genisoimage is required for this stage
- // download create-config-drive
+ stage("Get mk-pipelines, pipeline-library and mcp-common-scripts repositories") {
+ def cloned = true
+ withCredentials([[$class: 'SSHUserPrivateKeyBinding',
+ keyFileVariable: "GERRIT_KEY",
+ credentialsId: env.GERRIT_MCP_CREDENTIALS_ID,
+ usernameVariable: "GERRIT_USERNAME",
+ passwordVariable: "GERRIT_PASSWORD"]]) {
- def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${common_scripts_commit}/config-drive/create_config_drive.sh"
- sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
+ dir("mcp-common-scripts-git") {
+ cloned = gerrit.gerritPatchsetCheckout([
+ credentialsId : "${GERRIT_MCP_CREDENTIALS_ID}",
+ gerritBranch: "${MCP_VERSION}",
+ gerritRefSpec: "${MCP_COMMON_SCRIPTS_REFS}",
+ gerritScheme: "ssh",
+ gerritName: "${GERRIT_USERNAME}",
+ gerritHost: "gerrit.mcp.mirantis.net",
+ gerritPort: "29418",
+ gerritProject: "mcp/mcp-common-scripts"
+ ])
+ }
+ if (!cloned) {
+ error("Failed to clone the repository mcp/mcp-common-scripts")
+ }
- def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${common_scripts_commit}/config-drive/master_config.yaml"
- sh "wget -O user_data ${user_data_script_url}"
+ sh ("""\
+ set -ex
+ eval \$(ssh-agent)
+ ssh-add ${GERRIT_KEY}
+ git clone --mirror ssh://${GERRIT_USERNAME}@gerrit.mcp.mirantis.net:29418/mk/mk-pipelines mk-pipelines
+ git clone --mirror ssh://${GERRIT_USERNAME}@gerrit.mcp.mirantis.net:29418/mcp-ci/pipeline-library pipeline-library
+ """)
+
+ if (PIPELINE_LIBRARY_REF != '') {
+ sh ("""\
+ set -ex
+ eval \$(ssh-agent)
+ ssh-add ${GERRIT_KEY}
+ cd pipeline-library
+ git fetch https://gerrit.mcp.mirantis.net/mcp-ci/pipeline-library ${PIPELINE_LIBRARY_REF}
+ git tag ${MCP_VERSION} FETCH_HEAD -f
+ """)
+ }
+ if (MK_PIPELINES_REF != '') {
+ sh ("""\
+ set -ex
+ eval \$(ssh-agent)
+ ssh-add ${GERRIT_KEY}
+ cd mk-pipelines
+ git fetch https://gerrit.mcp.mirantis.net/mcp-ci/mk-pipelines ${MK_PIPELINES_REF}
+ git tag ${MCP_VERSION} FETCH_HEAD -f
+ """)
+ }
+
+ // dir("mk-pipelines-git") {
+ // cloned = gerrit.gerritPatchsetCheckout([
+ // credentialsId : "${GERRIT_MCP_CREDENTIALS_ID}",
+ // gerritRefSpec: "${MK_PIPELINES_REF}",
+ // gerritScheme: "ssh",
+ // gerritName: "${GERRIT_USERNAME}",
+ // gerritHost: "gerrit.mcp.mirantis.net",
+ // gerritPort: "29418",
+ // gerritProject: "mk/mk-pipelines"
+ // ])
+ // }
+ // if (!cloned) {
+ // error("Failed to clone the repository mk/mk-pipelines")
+ // }
+
+ // dir("pipeline-library-git") {
+ // cloned = gerrit.gerritPatchsetCheckout([
+ // credentialsId : "${GERRIT_MCP_CREDENTIALS_ID}",
+ // gerritRefSpec: "${PIPELINE_LIBRARY_REF}",
+ // gerritScheme: "ssh",
+ // gerritName: "${GERRIT_USERNAME}",
+ // gerritHost: "gerrit.mcp.mirantis.net",
+ // gerritPort: "29418",
+ // gerritProject: "mcp-ci/pipeline-library"
+ // ])
+ // }
+ // if (!cloned) {
+ // error("Failed to clone the repository mcp-ci/pipeline-library")
+ // }
+ }
+ //if (PIPELINE_LIBRARY_REF != '') {
+ // sh "cd pipeline-library; git tag ${MCP_VERSION} FETCH_HEAD -f ; cd .."
+ //}
+ //if (MK_PIPELINES_REF != '') {
+ // sh "cd mk-pipelines; git tag ${MCP_VERSION} FETCH_HEAD -f; cd .."
+ //}
+
+ // gerrit.gerritPatchsetCheckout() doesn't support clonning bare repository
+ // sh "git clone --mirror mk-pipelines-git mk-pipelines"
+ // sh "git clone --mirror pipeline-library-git pipeline-library"
}
- stage("Clone mk-pipelines and pipeline-library") {
- sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git -b ${MCP_VERSION} mk-pipelines"
- sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git -b ${MCP_VERSION} pipeline-library"
- if (PIPELINE_LIBRARY_REF != '') {
- sh "cd pipeline-library; git fetch https://gerrit.mcp.mirantis.net/mcp-ci/pipeline-library ${PIPELINE_LIBRARY_REF} ; git tag ${MCP_VERSION} FETCH_HEAD -f ; cd .."
- }
- if (MK_PIPELINES_REF != '') {
- sh "cd mk-pipelines; git fetch https://gerrit.mcp.mirantis.net/mcp-ci/mk-pipelines ${MK_PIPELINES_REF} ; git tag ${MCP_VERSION} FETCH_HEAD -f; cd .."
- }
+ stage("Prepare arguments for generation config drive") {
+
+ config_drive_script_path = "mcp-common-scripts-git/config-drive/create_config_drive.sh"
+ user_data_script_path = "mcp-common-scripts-git/config-drive/master_config.yaml"
+ sh "chmod +x ${config_drive_script_path}"
+
//args = "--user-data user_data --vendor-data user_data2 --hostname cfg01 --model model --mk-pipelines mk-pipelines/ --pipeline-library pipeline-library/ ${iso_name}"
- args = "--user-data user_data2 --vendor-data user_data --hostname cfg01 --model model --mk-pipelines mk-pipelines/ --pipeline-library pipeline-library/ ${iso_name}"
+ args = "--user-data user_data2 --vendor-data ${user_data_script_path} --hostname cfg01 --model model --mk-pipelines mk-pipelines/ --pipeline-library pipeline-library/ ${iso_name}"
}
stage("Get cluster model") {
@@ -70,7 +152,7 @@
stage("Set data"){
for (i in entries(smc)) {
- sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" user_data"
+ sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" ${user_data_script_path}"
}
}
@@ -151,8 +233,8 @@
stage("Create config-drive"){
// create cfg config-drive
- //sh "sed -i 's,config_dir/vendor-data,config_dir/user-data1,g' ./create-config-drive"
- sh "./create-config-drive ${args}"
+ // apt package genisoimage is required for this stage
+ sh "./${config_drive_script_path} ${args}"
}
stage("Save artifacts") {
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index bc411f7..780229d 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -16,6 +16,7 @@
* REPOSITORY_SUITE Not used (backward compatibility, for manual deployment steps only)
* MCP_IMAGE_PATH1604 Not used (backward compatibility, for manual deployment steps only)
* IMAGE_PATH_CFG01_DAY01 Not used (backward compatibility, for manual deployment steps only)
+ * TEMPEST_IMAGE_VERSION Tempest image version: pike by default, can be queens.
*/
@Library('tcp-qa')_
@@ -54,6 +55,7 @@
}
if (steps.contains('openstack')) {
sources += """
+ export TEMPEST_IMAGE_VERSION=${TEMPEST_IMAGE_VERSION}
# TODO: . ./tcp_tests/utils/env_keystonercv3\n"""
}
def installed = steps.collect {"""\
diff --git a/jobs/pipelines/swarm-testrail-report.groovy b/jobs/pipelines/swarm-testrail-report.groovy
index 42027f0..3849e16 100644
--- a/jobs/pipelines/swarm-testrail-report.groovy
+++ b/jobs/pipelines/swarm-testrail-report.groovy
@@ -10,6 +10,7 @@
where 3200 and 2400 might be timeouts (not used in the testing pipeline)
* PARENT_NODE_NAME Name of the jenkins slave to create the environment
* PARENT_WORKSPACE Path to the workspace of the parent job to use tcp-qa repo
+ * TEMPEST_TEST_SUITE_NAME Name of tempest suite
* TCP_QA_REFS Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
*/
@@ -48,21 +49,28 @@
def report_result = ''
def report_url = ''
+ // deployment_report_name = "deployment_${ENV_NAME}.xml"
def deployment_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"deployment_${ENV_NAME}.xml\"", returnStdout: true)
+ // tcpqa_report_name =~ "nosetests.xml"
def tcpqa_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"nosetests.xml\"", returnStdout: true)
+ // tempest_report_name =~ "report_*.xml"
def tempest_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"report_*.xml\"", returnStdout: true)
+ // k8s_conformance_report_name =~ conformance_result.xml
def k8s_conformance_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"conformance_result.xml\"", returnStdout: true)
- def stacklight_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"stacklight_report.xml\"", returnStdout: true)
+ // k8s_conformance_report_name =~ conformance_virtlet_result.xml
+ def k8s_conformance_virtlet_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"conformance_virtlet_result.xml\"", returnStdout: true)
+ // stacklight_report_name =~ "stacklight_report.xml" or "report.xml"
+ def stacklight_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"*report.xml\"", returnStdout: true)
common.printMsg(deployment_report_name ? "Found deployment report: ${deployment_report_name}" : "Deployment report not found", deployment_report_name ? "blue" : "red")
common.printMsg(tcpqa_report_name ? "Found tcp-qa report: ${tcpqa_report_name}" : "tcp-qa report not found", tcpqa_report_name ? "blue" : "red")
common.printMsg(tempest_report_name ? "Found tempest report: ${tempest_report_name}" : "tempest report not found", tempest_report_name ? "blue" : "red")
common.printMsg(k8s_conformance_report_name ? "Found k8s conformance report: ${k8s_conformance_report_name}" : "k8s conformance report not found", k8s_conformance_report_name ? "blue" : "red")
+ common.printMsg(k8s_conformance_virtlet_report_name ? "Found k8s conformance virtlet report: ${k8s_conformance_virtlet_report_name}" : "k8s conformance virtlet report not found", k8s_conformance_virtlet_report_name ? "blue" : "red")
common.printMsg(stacklight_report_name ? "Found stacklight-pytest report: ${stacklight_report_name}" : "stacklight-pytest report not found", stacklight_report_name ? "blue" : "red")
if (deployment_report_name) {
stage("Deployment report") {
-// report_name = "deployment_${ENV_NAME}.xml"
testSuiteName = "[MCP] Integration automation"
methodname = '{methodname}'
testrail_name_template = '{title}'
@@ -84,7 +92,6 @@
if (tcpqa_report_name) {
stage("tcp-qa cases report") {
- // tcpqa_report_name =~ "nosetests.xml"
testSuiteName = "[MCP_X] integration cases"
methodname = "{methodname}"
testrail_name_template = "{title}"
@@ -106,8 +113,7 @@
if ('openstack' in stacks && tempest_report_name) {
stage("Tempest report") {
- // tempest_report_name =~ "report_*.xml"
- testSuiteName = "[MCP1.1_PIKE]Tempest"
+ testSuiteName = env.TEMPEST_TEST_SUITE_NAME
methodname = "{classname}.{methodname}"
testrail_name_template = "{title}"
report_result = shared.upload_results_to_testrail(tempest_report_name, testSuiteName, methodname, testrail_name_template)
@@ -123,9 +129,6 @@
if ('k8s' in stacks && k8s_conformance_report_name) {
stage("K8s conformance report") {
- // k8s_conformance_report_name =~ conformance_result.xml
- // TODO(ddmitriev): it's better to get the k8s version right after deployment
- // and store in some artifact that can be re-used here.
def k8s_version=shared.run_cmd_stdout("""\
. ./env_k8s_version;
echo "\$KUBE_SERVER_VERSION"
@@ -150,9 +153,30 @@
}
}
+ if ('k8s' in stacks && k8s_conformance_virtlet_report_name) {
+ stage("K8s conformance virtlet report") {
+ testSuiteName = "[k8s] Virtlet"
+ methodname = "{methodname}"
+ testrail_name_template = "{title}"
+ reporter_extra_options = [
+ "--send-duplicates",
+ "--testrail-add-missing-cases",
+ "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
+ "--testrail-case-section-name \'Conformance\'",
+ ]
+ report_result = shared.upload_results_to_testrail(k8s_conformance_virtlet_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+ common.printMsg(report_result, "blue")
+ report_url = report_result.split("\n").each {
+ if (it.contains("[TestRun URL]")) {
+ common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
+ description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+ }
+ }
+ }
+ }
+
if ('stacklight' in stacks && stacklight_report_name) {
stage("stacklight-pytest report") {
- // stacklight_report_name =~ "stacklight_report.xml"
testSuiteName = "LMA2.0_Automated"
methodname = "{methodname}"
testrail_name_template = "{title}"
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index 1c05b12..92b43b2 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -76,10 +76,23 @@
parameters: parameters,
propagate: false
+ def build_number = job_info.getNumber()
+ def build_url = job_info.getAbsoluteUrl()
+ def build_status = job_info.getResult()
+ try {
+ // Try to grab 'tar.gz' articacts from the shell job'
+ step($class: 'hudson.plugins.copyartifact.CopyArtifact',
+ projectName: job_name,
+ selector: specific("${build_number}"),
+ filter: "**/*.tar.gz",
+ target: '.',
+ flatten: true,
+ fingerprintArtifacts: true)
+ } catch (none) {
+ common.printMsg("No *.tar.gz files found in artifacts of the build ${build_url}", "purple")
+ }
+
if (job_info.getResult() != "SUCCESS") {
- def build_status = job_info.getResult()
- def build_number = job_info.getNumber()
- def build_url = job_info.getAbsoluteUrl()
def job_url = "${build_url}"
currentBuild.result = build_status
if (junit_report_filename) {
@@ -204,6 +217,7 @@
// Run pytest tests
def common = new com.mirantis.mk.Common()
def tcp_qa_refs = env.TCP_QA_REFS ?: ''
+ def tempest_image_version = env.TEMPEST_IMAGE_VERSION ?: 'pike'
def parameters = [
string(name: 'ENV_NAME', value: "${ENV_NAME}"),
string(name: 'PASSED_STEPS', value: passed_steps),
@@ -216,6 +230,8 @@
string(name: 'REPOSITORY_SUITE', value: "${MCP_VERSION}"),
string(name: 'MCP_IMAGE_PATH1604', value: "${MCP_IMAGE_PATH1604}"),
string(name: 'IMAGE_PATH_CFG01_DAY01', value: "${IMAGE_PATH_CFG01_DAY01}"),
+ string(name: 'TEMPEST_IMAGE_VERSION', value: "${tempest_image_version}"),
+
]
common.printMsg("Start building job 'swarm-run-pytest' with parameters:", "purple")
common.prettyPrint(parameters)
@@ -227,6 +243,7 @@
// Run pytest tests
def common = new com.mirantis.mk.Common()
def tcp_qa_refs = env.TCP_QA_REFS ?: ''
+ def tempest_test_suite_name = env.TEMPEST_TEST_SUITE_NAME
def parameters = [
string(name: 'ENV_NAME', value: "${ENV_NAME}"),
string(name: 'MCP_VERSION', value: "${MCP_VERSION}"),
@@ -234,6 +251,7 @@
string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
string(name: 'PARENT_WORKSPACE', value: pwd()),
string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
+ string(name: 'TEMPEST_TEST_SUITE_NAME', value: "${tempest_test_suite_name}"),
]
common.printMsg("Start building job 'swarm-testrail-report' with parameters:", "purple")
common.prettyPrint(parameters)
diff --git a/tcp_tests/fixtures/k8s_fixtures.py b/tcp_tests/fixtures/k8s_fixtures.py
index 4a18d03..409034e 100644
--- a/tcp_tests/fixtures/k8s_fixtures.py
+++ b/tcp_tests/fixtures/k8s_fixtures.py
@@ -77,16 +77,16 @@
interfaces_pillar = k8s_actions._salt.get_pillar(
tgt=tgt, pillar='linux:network:interface')[0]
- for node_name, interfaces in interfaces_pillar.items():
+ for minion_id, interfaces in interfaces_pillar.items():
for iface_name, iface in interfaces.items():
iface_name = iface.get('name', iface_name)
default_proto = 'static' if 'address' in iface else 'dhcp'
if iface.get('proto', default_proto) != 'dhcp':
LOG.warning('Trying to kill dhclient for iface {0} '
- 'on node {1}'.format(iface_name, node_name))
+ 'on node {1}'.format(iface_name, minion_id))
underlay.check_call(
cmd='pkill -f "dhclient.*{}"'.format(iface_name),
- node_name=node_name, raise_on_err=False)
+ node_name=minion_id, raise_on_err=False)
LOG.warning('Restarting keepalived service on controllers...')
k8s_actions._salt.local(tgt='ctl*', fun='cmd.run',
diff --git a/tcp_tests/fixtures/salt_fixtures.py b/tcp_tests/fixtures/salt_fixtures.py
index 7f4ce60..226ab22 100644
--- a/tcp_tests/fixtures/salt_fixtures.py
+++ b/tcp_tests/fixtures/salt_fixtures.py
@@ -71,13 +71,7 @@
LOG.info("############ Executing command ####### {0}".format(commands))
salt_actions.install(commands)
- salt_nodes = salt_actions.get_ssh_data()
- config.underlay.ssh = config.underlay.ssh + \
- [node for node in salt_nodes
- if not any(node['node_name'] == n['node_name']
- for n in config.underlay.ssh)]
- underlay.config_ssh = []
- underlay.add_config_ssh(config.underlay.ssh)
+ salt_actions.update_ssh_data_from_minions()
hardware.create_snapshot(ext.SNAPSHOT.salt_deployed)
salt_actions.sync_time()
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
index f776221..17ad452 100644
--- a/tcp_tests/managers/envmanager_devops.py
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -129,6 +129,7 @@
for d_node in self.__env.get_nodes(role__in=roles):
ssh_data = {
'node_name': d_node.name,
+ 'minion_id': d_node.name,
'roles': [d_node.role],
'address_pool': self._get_network_pool(
ext.NETWORK_TYPE.admin).address_pool.name,
diff --git a/tcp_tests/managers/execute_commands.py b/tcp_tests/managers/execute_commands.py
index dda95d2..6dcf615 100644
--- a/tcp_tests/managers/execute_commands.py
+++ b/tcp_tests/managers/execute_commands.py
@@ -115,6 +115,9 @@
failed += 1
if 'Minion did not return. [Not connected]' in s:
failed += 1
+ if ('Salt request timed out. The master is not responding.'
+ in s):
+ failed += 1
if s.startswith("[CRITICAL]"):
failed += 1
if 'Fatal' in s:
@@ -133,11 +136,14 @@
if x == 1 and skip_fail is False:
# In the last retry iteration, raise an exception
raise Exception("Step '{0}' failed:\n"
- "=============== STDOUT: ===============\n"
+ "=============== Command: ==============\n"
"{1}\n"
- "=============== STDERR: ===============\n"
+ "=============== STDOUT: ===============\n"
"{2}\n"
+ "=============== STDERR: ===============\n"
+ "{3}\n"
.format(description,
+ cmd,
result.stdout_str,
result.stderr_str))
@@ -233,7 +239,8 @@
result = {}
with self.__underlay.local() as local:
- result = local.execute('cd {0} && find . -type f -name "{1}"'
+ result = local.execute('cd {0} && find . -maxdepth 1 -type f'
+ ' -name "{1}"'
.format(local_path, local_filename))
LOG.info("Found files to upload:\n{0}".format(result))
@@ -277,7 +284,7 @@
with self.__underlay.remote(node_name=node_name) as remote:
- result = remote.execute('find {0} -type f -name {1}'
+ result = remote.execute('find {0} -maxdepth 1 -type f -name {1}'
.format(remote_path, remote_filename))
LOG.info("Found files to download:\n{0}".format(result))
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index e3e69a5..79974d3 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -112,6 +112,41 @@
names.sort()
return names[0]
+ @property
+ def controller_minion_id(self):
+ """ Return node name of controller node that used for all actions """
+ minion_ids = [minion_id['minion_id'] for minion_id in
+ self.get_controllers()]
+ # we want to return same controller name every time
+ minion_ids.sort()
+ return minion_ids[0]
+
+ @property
+ def is_metallb_enabled(self):
+ ctl_tgt = self.controller_minion_id
+ LOG.debug("Controller target: {}".format(ctl_tgt))
+
+ result = self._salt.get_pillar(
+ tgt=ctl_tgt,
+ pillar='kubernetes:common:addons:metallb:enabled')
+ metallb = result[0].get(ctl_tgt, False)
+ LOG.info("{} kubernetes:common:addons:metallb:enabled: {}"
+ .format(ctl_tgt, bool(metallb)))
+ return metallb
+
+ @property
+ def is_ingress_nginx_enabled(self):
+ ctl_tgt = self.controller_minion_id
+ LOG.debug("Controller target: {}".format(ctl_tgt))
+
+ result = self._salt.get_pillar(
+ tgt=ctl_tgt,
+ pillar='kubernetes:common:addons:ingress-nginx:enabled')
+ ingress_nginx = result[0].get(ctl_tgt, False)
+ LOG.info("{} kubernetes:common:addons:ingress-nginx:enabled: {}"
+ .format(ctl_tgt, bool(ingress_nginx)))
+ return ingress_nginx
+
def controller_check_call(self, cmd, **kwargs):
""" Run command on controller and return result """
LOG.info("running cmd on k8s controller: {}".format(cmd))
@@ -195,28 +230,29 @@
raise_on_err=raise_on_err, verbose=True)
def run_virtlet_conformance(self, timeout=60 * 120,
- log_file='virtlet_conformance.log'):
+ log_file='virtlet_conformance.log',
+ report_name="report.xml"):
if self.__config.k8s.run_extended_virtlet_conformance:
ci_image = "cloud-images.ubuntu.com/xenial/current/" \
"xenial-server-cloudimg-amd64-disk1.img"
cmd = ("set -o pipefail; "
"docker run --net=host {0} /virtlet-e2e-tests "
- "-include-cloud-init-tests -junitOutput report.xml "
+ "-include-cloud-init-tests -junitOutput {3} "
"-image {2} -sshuser ubuntu -memoryLimit 1024 "
"-alsologtostderr -cluster-url http://127.0.0.1:8080 "
"-ginkgo.focus '\[Conformance\]' "
"| tee {1}".format(
self.__config.k8s_deploy.kubernetes_virtlet_image,
- log_file, ci_image))
+ log_file, ci_image, report_name))
else:
cmd = ("set -o pipefail; "
"docker run --net=host {0} /virtlet-e2e-tests "
- "-junitOutput report.xml "
+ "-junitOutput {2} "
"-alsologtostderr -cluster-url http://127.0.0.1:8080 "
"-ginkgo.focus '\[Conformance\]' "
"| tee {1}".format(
self.__config.k8s_deploy.kubernetes_virtlet_image,
- log_file))
+ log_file, report_name))
LOG.info("Executing: {}".format(cmd))
with self.__underlay.remote(
node_name=self.controller_name) as remote:
diff --git a/tcp_tests/managers/runtestmanager.py b/tcp_tests/managers/runtestmanager.py
index 8e70e58..e7fc15c 100644
--- a/tcp_tests/managers/runtestmanager.py
+++ b/tcp_tests/managers/runtestmanager.py
@@ -57,6 +57,7 @@
self.control_host)[0]
self.compute_name = self.underlay.get_target_node_names(
self.compute_host)[0]
+ self.barbican = False
@property
def salt_api(self):
@@ -70,8 +71,9 @@
public_cidr = public_net["cidr"].encode("ascii")
public_allocation_start = public_net["start"].encode("ascii")
public_allocation_end = public_net["end"].encode("ascii")
+ tempest_test_target = self.target_name.encode("ascii") + "*"
- return {
+ pillar = {
'classes': ['service.runtest.tempest',
'service.runtest.tempest.public_net',
'service.runtest.tempest.services.manila.glance'],
@@ -86,7 +88,7 @@
public_allocation_start,
'openstack_public_neutron_subnet_allocation_end':
public_allocation_end,
- 'tempest_test_target': self.target_name.encode("ascii"),
+ 'tempest_test_target': tempest_test_target,
'glance_image_cirros_location':
'http://cz8133.bud.mirantis.net:8099'
'/cirros-0.3.5-x86_64-disk.img',
@@ -138,6 +140,11 @@
'run_snapshot_tests': True,
}}}}}
+ if self.barbican:
+ pillar['classes'].append('service.runtest.tempest.barbican')
+
+ return pillar
+
def fetch_arficats(self, username=None, file_format='xml'):
with self.underlay.remote(node_name=self.target_name,
username=None) as tgt:
@@ -191,6 +198,10 @@
f.write(container_inspect)
def prepare(self):
+ barbican_pillar = "nova:controller:barbican:enabled"
+ result = self.__salt_api.get_pillar(tgt=self.control_name,
+ pillar=barbican_pillar)
+ self.barbican = result[0].get(self.control_name, False)
self.store_runtest_model()
cirros_pillar = ("salt-call --out=newline_values_only "
"pillar.get "
@@ -279,6 +290,19 @@
" --property hw:mem_page_size=any'")},
)
+ if self.barbican:
+ commands.append({
+ 'description': "Configure barbican",
+ 'node_name': self.master_name,
+ 'cmd': ("set -ex;" +
+ salt_call_cmd +
+ " state.sls barbican.client && " +
+ salt_call_cmd +
+ " state.sls runtest.test_accounts && " +
+ salt_call_cmd +
+ " state.sls runtest.barbican_sign_image")},
+ )
+
self.__salt_api.execute_commands(commands=commands,
label="Prepare for Tempest")
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 6fad0e4..a468b02 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -188,13 +188,14 @@
if len(hosts) == 0:
raise LookupError("Hosts is empty or absent")
- def host(node_name, ip):
+ def host(minion_id, ip):
return {
'roles': ['salt_minion'],
'keys': [
k['private'] for k in self.__config.underlay.ssh_keys
],
- 'node_name': node_name,
+ 'node_name': minion_id,
+ 'minion_id': minion_id,
'host': ip,
'address_pool': pool_name,
'login': settings.SSH_NODE_CREDENTIALS['login'],
@@ -216,6 +217,25 @@
host_list={k: v['ipv4'] for k, v in hosts.items()}))
raise StopIteration(msg)
+ def update_ssh_data_from_minions(self):
+ """Combine existing underlay.ssh with VCP salt minions"""
+ salt_nodes = self.get_ssh_data()
+
+ for salt_node in salt_nodes:
+ nodes = [n for n in self.__config.underlay.ssh
+ if salt_node['host'] == n['host']
+ and salt_node['address_pool'] == n['address_pool']]
+ if nodes:
+ # Assume that there can be only one node with such IP address
+ # Just update minion_id for this node
+ nodes[0]['minion_id'] = salt_node['minion_id']
+ else:
+ # New node, add to config.underlay.ssh
+ self.__config.underlay.ssh.append(salt_node)
+
+ self.__underlay.config_ssh = []
+ self.__underlay.add_config_ssh(self.__config.underlay.ssh)
+
def service_status(self, tgt, service):
result = self.local(tgt=tgt, fun='service.status', args=service)
return result['return']
diff --git a/tcp_tests/managers/sl_manager.py b/tcp_tests/managers/sl_manager.py
index 3bb0a1f..f3ccef8 100644
--- a/tcp_tests/managers/sl_manager.py
+++ b/tcp_tests/managers/sl_manager.py
@@ -41,7 +41,6 @@
def install(self, commands, label='Install SL services'):
self.execute_commands(commands, label=label)
self.__config.stack_light.stacklight_installed = True
- self.__config.stack_light.sl_vip_host = self.get_sl_vip()
def get_sl_vip(self):
tgt = 'I@prometheus:server:enabled:True'
@@ -76,6 +75,7 @@
@property
def api(self):
if self._p_client is None:
+ self.__config.stack_light.sl_vip_host = self.get_sl_vip()
self._p_client = prometheus_client.PrometheusClient(
host=self.__config.stack_light.sl_vip_host,
port=self.__config.stack_light.sl_prometheus_port,
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 0bfb463..66f686b 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -39,6 +39,7 @@
[
{
node_name: node1,
+ minion_id: node1.local,
address_pool: 'public-pool01',
host: ,
port: ,
@@ -50,6 +51,7 @@
},
{
node_name: node1,
+ minion_id: node1.local,
address_pool: 'private-pool01',
host:
port:
@@ -61,6 +63,7 @@
},
{
node_name: node2,
+ minion_id: node2.local,
address_pool: 'public-pool01',
keys_source_host: node1
...
@@ -96,6 +99,7 @@
ssh_data = {
# Required keys:
'node_name': ssh['node_name'],
+ 'minion_id': ssh['minion_id'],
'host': ssh['host'],
'login': ssh['login'],
'password': ssh['password'],
@@ -122,6 +126,7 @@
ssh_data = {
# Required keys:
'node_name': ssh['node_name'],
+ 'minion_id': ssh['minion_id'],
'host': ssh['host'],
'login': ssh['login'],
'password': ssh['password'],
@@ -143,7 +148,7 @@
return keys
def __ssh_data(self, node_name=None, host=None, address_pool=None,
- node_role=None):
+ node_role=None, minion_id=None):
ssh_data = None
@@ -171,6 +176,16 @@
break
else:
ssh_data = ssh
+ elif minion_id is not None:
+ for ssh in self.config_ssh:
+ if minion_id == ssh['minion_id']:
+ if address_pool is not None:
+ if address_pool == ssh['address_pool']:
+ ssh_data = ssh
+ break
+ else:
+ ssh_data = ssh
+
if ssh_data is None:
LOG.debug("config_ssh - {}".format(self.config_ssh))
raise Exception('Auth data for node was not found using '
@@ -187,6 +202,15 @@
names.append(ssh['node_name'])
return names
+ def minion_ids(self):
+ """Get list of minion ids registered in config.underlay.ssh"""
+
+ ids = [] # List is used to keep the original order of ids
+ for ssh in self.config_ssh:
+ if ssh['minion_id'] not in ids:
+ ids.append(ssh['minion_id'])
+ return ids
+
def host_by_node_name(self, node_name, address_pool=None):
ssh_data = self.__ssh_data(node_name=node_name,
address_pool=address_pool)
@@ -197,6 +221,11 @@
address_pool=address_pool)
return ssh_data['host']
+ def host_by_minion_id(self, minion_id, address_pool=None):
+ ssh_data = self.__ssh_data(minion_id=minion_id,
+ address_pool=address_pool)
+ return ssh_data['host']
+
def remote(self, node_name=None, host=None, address_pool=None,
username=None):
"""Get SSHClient by a node name or hostname.
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index b3a3013..0a447d6 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -96,6 +96,7 @@
ct.Cfg('ssh', ct.JSONList(),
help="""SSH Settings for Underlay: [{
'node_name': node1,
+ 'minion_id': node1.local,
'roles': ['salt-master', 'salt-minion', ],
'host': hostname,
'login': login,
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/core.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/core.yaml
index 0aebf89..fc35f88 100644
--- a/tcp_tests/templates/cookied-bm-contrail-maas/core.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/core.yaml
@@ -1,118 +1,19 @@
{% from 'cookied-bm-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/core.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/core.yaml
index 129693d..64f01fa 100644
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/core.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/core.yaml
@@ -1,118 +1,19 @@
{% from 'cookied-bm-contrail-nfv-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml
index 2675136..34c254d 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml
@@ -3,124 +3,20 @@
{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_KVM02 with context %}
{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_KVM03 with context %}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Setup glusterfs on primary controller
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install nginx on prx nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the OpenStack control VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
- echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail40/core.yaml b/tcp_tests/templates/cookied-bm-contrail40/core.yaml
index c815d86..21ab849 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/core.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/core.yaml
@@ -3,124 +3,20 @@
{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_KVM02 with context %}
{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_KVM03 with context %}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Setup glusterfs on primary controller
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install nginx on prx nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the OpenStack control VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
- echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/core.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/core.yaml
index 530a4e7..55d6d8d 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/core.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/core.yaml
@@ -1,117 +1,17 @@
{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
index 570000a..c8fc345 100644
--- a/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
@@ -111,6 +111,11 @@
kubernetes_compute_single_address_ranges: 10.167.8.103-10.167.8.104
kubernetes_compute_tenant_address_ranges: 10.167.8.103-10.167.8.104
kubernetes_network_opencontrail_enabled: 'True'
+ kubernetes_keepalived_vip_interface: br_ctl
+ kubernetes_metallb_enabled: 'False' # Not used with opencontrail
+ metallb_addresses: 172.17.41.160-172.17.41.180
+ kubernetes_ingressnginx_enabled: 'True'
+ kubernetes_ingressnginx_controller_replicas: 2
local_repositories: 'False'
maas_deploy_address: 172.16.49.66
maas_deploy_range_end: 10.0.0.254
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
index bf6c2da..a3de973 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
@@ -6,5 +6,6 @@
{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml
index b7fcb07..b1e37c6 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml
@@ -3,124 +3,13 @@
{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM02 with context %}
{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM03 with context %}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Setup glusterfs on primary controller
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install nginx on prx nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the OpenStack control VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
- echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml
index 9971a9f..4dc3470 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml
@@ -3,124 +3,13 @@
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Setup glusterfs on primary controller
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install nginx on prx nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the OpenStack control VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
- echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/core.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/core.yaml
index e6dc270..4d9af8c 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/core.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/core.yaml
@@ -1,117 +1,19 @@
{% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/core.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/core.yaml
new file mode 100644
index 0000000..80073cf
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/core.yaml
@@ -0,0 +1,11 @@
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/lab04-physical-inventory.yaml
new file mode 100644
index 0000000..5aa9ebe
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/lab04-physical-inventory.yaml
@@ -0,0 +1,94 @@
+nodes:
+ cfg01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_static_ctl
+ single_address: 10.167.8.99
+
+ # Physical nodes
+ kvm01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ kvm02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ kvm03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ cmp<<count>>:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - features_lvm_backend_volume_sdb
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f1:
+ enp2s0f1:
+ role: single_dhcp
+ enp5s0f0:
+ role: bond0_ab_contrail
+ enp5s0f1:
+ role: single_vlan_ctl
+
+# cmp001.cookied-bm-oc40-queens.local:
+# reclass_storage_name: openstack_compute_node01
+# roles:
+# - openstack_compute
+# - features_lvm_backend_volume_sdb
+# - linux_system_codename_xenial
+# interfaces:
+# enp2s0f1:
+# role: single_mgm
+# deploy_address: 172.16.49.73
+# enp5s0f0:
+# role: single_contrail_vlan_prv
+# tenant_address: 192.168.0.101
+# enp5s0f1:
+# role: single_vlan_ctl
+# single_address: 10.167.8.101
+# cmp002.cookied-bm-oc40-queens.local:
+# reclass_storage_name: openstack_compute_node02
+# roles:
+# - openstack_compute
+# - features_lvm_backend_volume_sdb
+# - linux_system_codename_xenial
+# interfaces:
+# enp2s0f1:
+# role: single_mgm
+# deploy_address: 172.16.49.74
+# enp5s0f0:
+# role: single_contrail_vlan_prv
+# tenant_address: 192.168.0.102
+# enp5s0f1:
+# role: single_vlan_ctl
+# single_address: 10.167.8.102
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml
new file mode 100644
index 0000000..7dff4de
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml
@@ -0,0 +1,287 @@
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+
+{% set PATTERN = os_env('PATTERN', 'false') %}
+{% set RUN_TEMPEST = os_env('RUN_TEMPEST', 'false') %}
+
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+# Install OpenStack control services
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
+
+- description: WR Install cinder volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:volume' state.sls cinder
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
+
+# install contrail
+
+- description: Install Docker services
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host' match.pillar 'docker:host' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host' state.sls docker.host
+ fi; sleep 10;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install opencontrail database services on first minion
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install opencontrail database services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database' state.sls opencontrail.database
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail control services on first minion
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail control services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail collectors on first minion
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:collector and *01*' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail collectors
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Spawn Opencontrail docker images
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control or I@opencontrail:collector' state.sls docker.client && sleep 15;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Finalize opencontrail services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 30}
+ skip_fail: false
+
+- description: Finalize opencontrail services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:client and not I@opencontrail:compute' state.sls opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Finalize opencontrail services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 30}
+ skip_fail: true
+
+- description: Check contrail status
+ cmd: sleep 15; salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Reboot computes
+ cmd: |
+ salt "cmp*" system.reboot;
+ sleep 600;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Remove crashes files from /var/crashes/ while vrouter was crashed
+ cmd: salt "cmp*" cmd.run "rm -rf /var/crashes/*"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Apply Opencontrail compute
+ cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 30}
+ skip_fail: false
+
+- description: Apply Opencontrail compute
+ cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Check status for contrail services
+ cmd: |
+ sleep 15;
+ salt -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Hack resolv.conf on VCP nodes for internal services access
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Create heat-net before external net create
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create heat-net'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create public network for contrail
+ cmd: |
+ salt 'ntw01*' contrail.virtual_network_create public '{"external":true,"ip_prefix":"192.168.200.0","ip_prefix_len":24,"asn":64512,"target":10000}'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create heat-net 10.20.30.0/24 --allocation-pool start=10.20.30.10,end=10.20.30.254 --gateway 10.20.30.1 --name heat-subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create heat-router'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set heat-router public'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add heat-router heat-subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+# Starting prepare runtest
+
+- description: Upload tempest template
+ upload:
+ local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+ local_filename: runtest.yml
+ remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/
+ node_name: {{ HOSTNAME_CFG01 }}
+ skip_fail: False
+
+- description: Include class with tempest template into cfg node
+ cmd: |
+ sed -i 's/classes\:/classes\:\n- cluster.{{ LAB_CONFIG_NAME }}.infra.runtest/g' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml;
+ salt '*' saltutil.refresh_pillar;
+ salt '*' saltutil.sync_all;
+ salt 'ctl01*' pkg.install docker.io;
+ salt 'ctl01*' cmd.run 'iptables --policy FORWARD ACCEPT';
+ salt 'cfg01*' state.sls salt.minion && sleep 20;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Enforce keystone client
+ cmd: |
+ salt 'cfg01*' state.sls keystone.client;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Create flavors for tests
+ cmd: |
+ salt 'cfg01*' state.sls nova.client;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Upload cirros image
+ cmd: |
+ salt 'cfg01*' state.sls glance.client;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Generate tempest config
+ cmd: |
+ salt 'cfg01*' state.sls runtest;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Download cirros image for runtest
+ cmd: |
+ wget http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img -O /tmp/TestCirros-0.3.5.img
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Run tempest from new docker image
+ cmd: |
+ OPENSTACK_VERSION=`salt-call --out=newline_values_only pillar.get _param:openstack_version`;
+ docker run -e ARGS="-r test -w 2" -v /tmp/test/tempest.conf:/etc/tempest/tempest.conf -v /tmp/:/tmp/ -v /tmp/test:/root/tempest -v /etc/ssl/certs/:/etc/ssl/certs/ --rm docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:$OPENSTACK_VERSION /bin/bash -c "run-tempest";
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Download xml results
+ download:
+ remote_path: /tmp/test/
+ remote_filename: "report_*.xml"
+ local_path: {{ os_env('PWD') }}
+ node_name: {{ HOSTNAME_CTL01 }}
+ skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml b/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml
new file mode 100644
index 0000000..f0d6d8a
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml
@@ -0,0 +1,47 @@
+classes:
+- service.runtest.tempest
+- service.runtest.tempest.public_net
+- service.runtest.tempest.services.manila.glance
+parameters:
+ _param:
+ glance_image_cirros_location: http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img
+ glance_image_fedora_location: http://cz8133.bud.mirantis.net:8099/Fedora-Cloud-Base-27-1.6.x86_64.qcow2
+ glance_image_manila_location: http://cz8133.bud.mirantis.net:8099/manila-service-image-master.qcow2
+ openstack_public_neutron_subnet_allocation_end: 192.168.200.220
+ openstack_public_neutron_subnet_allocation_start: 192.168.200.130
+ openstack_public_neutron_subnet_cidr: 192.168.200.0/24
+ openstack_public_neutron_subnet_gateway: 192.168.200.1
+ runtest_tempest_cfg_dir: /tmp/test
+ runtest_tempest_cfg_name: tempest.conf
+ runtest_tempest_public_net: public
+ tempest_test_target: ctl01*
+ neutron:
+ client:
+ enabled: true
+ runtest:
+ enabled: true
+ keystonerc_node: ctl01*
+ tempest:
+ DEFAULT:
+ log_file: tempest.log
+ cfg_dir: ${_param:runtest_tempest_cfg_dir}
+ cfg_name: ${_param:runtest_tempest_cfg_name}
+ compute:
+ min_compute_nodes: 2
+ convert_to_uuid:
+ network:
+ public_network_id: ${_param:runtest_tempest_public_net}
+ enabled: true
+ heat_plugin:
+ build_timeout: '600'
+ put_keystone_rc_enabled: false
+ put_local_image_file_enabled: false
+ share:
+ capability_snapshot_support: true
+ run_driver_assisted_migration_tests: false
+ run_manage_unmanage_snapshot_tests: false
+ run_manage_unmanage_tests: false
+ run_migration_with_preserve_snapshots_tests: false
+ run_quota_tests: true
+ run_replication_tests: false
+ run_snapshot_tests: true
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-cookiecutter-contrail.yaml
new file mode 100644
index 0000000..bfcd153
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-cookiecutter-contrail.yaml
@@ -0,0 +1,257 @@
+default_context:
+ backup_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
+ +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
+ qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
+ m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
+ 7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
+ 2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
+ HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
+ AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
+ o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
+ 5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
+ XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
+ AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
+ USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
+ uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
+ QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
+ 98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
+ r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
+ qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
+ CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
+ p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
+ 79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
+ NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
+ CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
+ XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
+ N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ cicd_control_node01_address: 10.167.8.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.8.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.8.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.8.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
+ oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
+ IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
+ kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
+ wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
+ 27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
+ 5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
+ lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
+ k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
+ 3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
+ dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
+ 0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
+ qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
+ BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
+ UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
+ VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
+ 1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
+ nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
+ Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
+ FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
+ HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
+ Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
+ poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
+ 17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
+ l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
+ cluster_domain: cookied-bm-oc40-queens.local
+ cluster_name: cookied-bm-oc40-queens
+ opencontrail_version: 4.0
+ linux_repo_contrail_component: oc40
+ compute_bond_mode: active-backup
+ compute_padding_with_zeros: 'True'
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.8.0/24
+ control_vlan: '2422'
+ cookiecutter_template_branch: ''
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+ deploy_network_gateway: 172.16.49.65
+ deploy_network_netmask: 255.255.255.192
+ deploy_network_subnet: 172.16.49.64/26
+ deployment_type: physical
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
+ email_address: sgudz@mirantis.com
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.8.241
+ infra_kvm01_deploy_address: 172.16.49.67
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.8.242
+ infra_kvm02_deploy_address: 172.16.49.68
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.8.243
+ infra_kvm03_deploy_address: 172.16.49.69
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.8.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ internal_proxy_enabled: 'False'
+ kqueen_custom_mail_enabled: 'False'
+ kqueen_enabled: 'False'
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 172.16.49.66
+ maas_deploy_cidr: 172.16.49.64/26
+ maas_deploy_gateway: 172.16.49.65
+ maas_deploy_range_end: 172.16.49.119
+ maas_deploy_range_start: 172.16.49.77
+ maas_deploy_vlan: '0'
+ maas_dhcp_enabled: 'True'
+ maas_fabric_name: fabric-51
+ maas_hostname: cfg01
+ maas_manage_deploy_network: 'True'
+ mcp_common_scripts_branch: ''
+ mcp_version: proposed
+ offline_deployment: 'False'
+ opencontrail_analytics_address: 10.167.8.30
+ opencontrail_analytics_hostname: nal
+ opencontrail_analytics_node01_address: 10.167.8.31
+ opencontrail_analytics_node01_hostname: nal01
+ opencontrail_analytics_node02_address: 10.167.8.32
+ opencontrail_analytics_node02_hostname: nal02
+ opencontrail_analytics_node03_address: 10.167.8.33
+ opencontrail_analytics_node03_hostname: nal03
+ opencontrail_compute_iface_mask: '24'
+ opencontrail_control_address: 10.167.8.20
+ opencontrail_control_hostname: ntw
+ opencontrail_control_node01_address: 10.167.8.21
+ opencontrail_control_node01_hostname: ntw01
+ opencontrail_control_node02_address: 10.167.8.22
+ opencontrail_control_node02_hostname: ntw02
+ opencontrail_control_node03_address: 10.167.8.23
+ opencontrail_control_node03_hostname: ntw03
+ opencontrail_enabled: 'True'
+ opencontrail_router01_address: 10.167.8.220
+ opencontrail_router01_hostname: rtr01
+ openldap_enabled: 'False'
+ openssh_groups: ''
+ openstack_benchmark_node01_address: 10.167.8.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 10.167.8
+ openstack_compute_rack01_tenant_subnet: 192.168.0
+ openstack_compute_single_address_ranges: 10.167.8.101-10.167.8.102
+ openstack_compute_deploy_address_ranges: 172.16.49.73-172.16.49.74
+ openstack_compute_tenant_address_ranges: 192.168.0.101-192.168.0.102
+ openstack_compute_backend_address_ranges: 192.168.0.101-192.168.0.102
+ openstack_compute_node01_hostname: cmp01
+ openstack_compute_node02_hostname: cmp02
+ openstack_compute_node01_address: 10.167.8.101
+ openstack_compute_node02_address: 10.167.8.102
+ openstack_compute_node01_single_address: 10.167.8.101
+ openstack_compute_node02_single_address: 10.167.8.102
+ openstack_compute_node01_deploy_address: 172.16.49.73
+ openstack_compute_node02_deploy_address: 172.16.49.74
+ openstack_control_address: 10.167.8.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.8.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.8.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.8.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.8.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.8.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.8.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.8.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_message_queue_address: 10.167.8.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.8.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.8.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.8.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: opencontrail
+ openstack_neutron_bgp_vpn: 'False'
+ openstack_neutron_bgp_vpn_driver: bagpipe
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_nova_compute_reserved_host_memory_mb: '900'
+ openstack_proxy_address: 10.167.8.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.8.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.8.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.8.19
+ openstack_version: queens
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_webhook_app_id: '24'
+ oss_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
+ salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
+ salt_master_address: 172.16.49.66
+ salt_master_hostname: cfg01
+ salt_master_management_address: 172.16.49.66
+ shared_reclass_branch: ''
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.8.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.8.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.8.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.8.63
+ stacklight_log_node03_hostname: log03
+ stacklight_long_term_storage_type: prometheus
+ stacklight_monitor_address: 10.167.8.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.8.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.8.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.8.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.8.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.8.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.8.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.8.88
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 192.168.0.220
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 192.168.0.0/24
+ tenant_vlan: '2423'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
+ openldap_domain: cookied-bm-oc40-queens.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-environment.yaml
new file mode 100644
index 0000000..90d7a3d
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-environment.yaml
@@ -0,0 +1,271 @@
+nodes:
+ # Virtual Control Plane nodes
+ cid01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ cid02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ cid03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ctl01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ctl02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ctl03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ dbs01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ dbs02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ dbs03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ msg01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ msg02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ msg03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ prx01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ prx02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mon01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mon02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mon03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ nal01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: opencontrail_analytics_node01
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ nal02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: opencontrail_analytics_node02
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ nal03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: opencontrail_analytics_node03
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ntw01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: opencontrail_control_node01
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ntw02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: opencontrail_control_node02
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ntw03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: opencontrail_control_node03
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mtr01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mtr02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mtr03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ log01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ log02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ log03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+# bmk01.cookied-bm-oc40-queens.local:
+# reclass_storage_name: openstack_benchmark_node01
+# roles:
+# - openstack_benchmark
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/salt.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/salt.yaml
new file mode 100644
index 0000000..3853acd
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/salt.yaml
@@ -0,0 +1,152 @@
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-oc40-queens') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = os_env('CLUSTER_CONTEXT_NAME', 'salt-context-cookiecutter-contrail.yaml') %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+- description: "Change path to internal storage for salt.control images"
+ cmd: |
+ set -e;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+ cmd: |
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+- description: Update minion information
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Rerun openssh after env model is generated
+ cmd: |
+ salt-call state.sls openssh
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.network.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+########################################
+# Spin up Control Plane VMs on KVM nodes
+########################################
+
+- description: Execute 'libvirt' states to create necessary libvirt networks
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Create VMs for control plane
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
+ cmd: |
+ salt-key -l acc| sort > /tmp/current_keys.txt &&
+ salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 20, delay: 30}
+ skip_fail: false
+
+#########################################
+# Configure all running salt minion nodes
+#########################################
+
+- description: Hack resolv.conf on VCP nodes for internal services access
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Refresh pillars on all minions
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Sync all salt resources
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Show reclass-salt --top for generated nodes
+ cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: "Lab04 workaround: Give each node root acces with key from cfg01"
+ cmd: |
+ set -e;
+ set -x;
+ key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
+ salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
+ salt '*' cmd.run "service sshd restart"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/sl.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/sl.yaml
new file mode 100644
index 0000000..2ff8f3d
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/sl.yaml
@@ -0,0 +1,15 @@
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-sl.yaml' as SHARED_SL with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
+
+{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
+{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
+{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
+{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
+{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
+{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
+{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
+{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--meta-data.yaml
new file mode 100644
index 0000000..a594a53
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..6c9e48f
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml
@@ -0,0 +1,102 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # # Block access to SSH while node is preparing
+ # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifdown ens3
+ - sudo ip r d default || true # remove existing default route to get it from dhcp
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ # - echo "Preparing base OS"
+
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+ # - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+
+ # Configure Ubuntu mirrors
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+ # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ # - apt-get clean
+ # - apt-get update
+
+ # Install common packages
+ # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Install salt-minion and stop it until it is configured
+ # - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ ########################################################
+ # Node is ready, allow SSH access
+ # - echo "Allow SSH access ..."
+ # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml
new file mode 100644
index 0000000..106c3d5
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml
@@ -0,0 +1,99 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # # Block access to SSH while node is preparing
+ # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup {interface_name}
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ # - echo "Preparing base OS"
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+ # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ # Configure Ubuntu mirrors
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+ # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ # - apt-get clean
+ # - eatmydata apt-get update && apt-get -y upgrade
+
+ # Install common packages
+ # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Install salt-minion and stop it until it is configured
+ # - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ # Install latest kernel
+ # - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
+
+ ########################################################
+ # Node is ready, allow SSH access
+ #- echo "Allow SSH access ..."
+ #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ # - reboot
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ # The loopback network interface
+ auto lo
+ iface lo inet loopback
+ auto {interface_name}
+ iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml
new file mode 100644
index 0000000..915981e
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml
@@ -0,0 +1,95 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup {interface_name}
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ # - echo "Preparing base OS"
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+ # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ # Configure Ubuntu mirrors
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+ # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ # - apt-get clean
+ # - eatmydata apt-get update && apt-get -y upgrade
+
+ # Install common packages
+ # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Install salt-minion and stop it until it is configured
+ # - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ ########################################################
+ # Node is ready, allow SSH access
+ # - echo "Allow SSH access ..."
+ # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ # The loopback network interface
+ auto lo
+ iface lo inet loopback
+ auto {interface_name}
+ iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay.yaml
new file mode 100644
index 0000000..e84e22d
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay.yaml
@@ -0,0 +1,574 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
+
+#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-oc40-queens') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
+#{% set HOSTNAME_CMP003 = os_env('HOSTNAME_CMP003', 'cmp003.' + DOMAIN_NAME) %}
+#{% set HOSTNAME_KVM04 = os_env('HOSTNAME_KVM04', 'kvm04.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
+{% set ETH2_IP_ADDRESS_CFG01 = os_env('ETH2_IP_ADDRESS_CFG01', '10.167.8.99') %}
+{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
+{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.68') %}
+{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.69') %}
+{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.73') %}
+{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
+#{% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.49.121') %}
+#{% set ETH0_IP_ADDRESS_KVM04 = os_env('ETH0_IP_ADDRESS_KVM04', '172.16.49.122') %}
+# {% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
+
+{% import 'cookied-bm-oc40-queens/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-bm-oc40-queens/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
+{% import 'cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_HWE with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
+ - &cloudinit_user_data_hwe {{ CLOUDINIT_USER_DATA_HWE }}
+
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-bm-oc4_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
+ params:
+ ip_reserved:
+ gateway: +62
+ l2_network_device: +61
+ default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+ default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+ default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+ default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+ default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+ default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+ #default_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
+ #default_{{ HOSTNAME_KVM04 }}: {{ ETH0_IP_ADDRESS_KVM04 }}
+ #default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+ virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+ virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+ virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+ virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+ virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+ #virtual_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
+ #virtual_{{ HOSTNAME_KVM04 }}: {{ ETH0_IP_ADDRESS_KVM04 }}
+ # virtual_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+ #ip_ranges:
+ # dhcp: [+2, -4]
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
+ params:
+ ip_reserved:
+ virtual_{{ HOSTNAME_CFG01 }}: {{ ETH2_IP_ADDRESS_CFG01 }}
+ gateway: +1
+ l2_network_device: +1
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '192.168.5.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '192.168.200.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: -2
+
+ groups:
+
+ - name: virtual
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+ network_pools:
+ admin: admin-pool01
+
+ l2_network_devices:
+ # Ironic management interface
+ admin:
+ address_pool: admin-pool01
+ dhcp: false
+ parent_iface:
+ phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
+ private:
+ parent_iface:
+ phys_dev: !os_env CONTROL_IFACE
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ format: qcow2
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: default
+ driver:
+ name: devops_driver_ironic
+ params:
+ os_auth_token: fake-token
+ ironic_url: !os_env IRONIC_URL # URL that will be used by fuel-devops
+ # to access Ironic API
+ # Agent URL that is accessible from deploying node when nodes
+ # are bootstrapped with PXE. Usually PXE/provision network address is used.
+ agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
+ agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
+
+ network_pools:
+ admin: admin-pool01
+
+ nodes:
+
+ # - name: {{ HOSTNAME_CFG01 }}
+ # role: salt_master
+ # params:
+ # ipmi_user: !os_env IPMI_USER
+ # ipmi_password: !os_env IPMI_PASSWORD
+ # ipmi_previlegies: OPERATOR
+ # ipmi_host: !os_env IPMI_HOST_CFG01 # hostname or IP address
+ # ipmi_lan_interface: lanplus
+ # ipmi_port: 623
+
+ # root_volume_name: system # see 'volumes' below
+ # cloud_init_volume_name: iso # see 'volumes' below
+ # cloud_init_iface_up: enp3s0f1 # see 'interfaces' below.
+ # volumes:
+ # - name: system
+ # capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # # The same as for agent URL, here is an URL to the image that should be
+ # # used for deploy the node. It should also be accessible from deploying
+ # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ # interfaces:
+ # - label: enp3s0f0 # Infra interface
+ # mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
+ # - label: enp3s0f1
+ # l2_network_device: admin
+ # mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+
+ # network_config:
+ # enp3s0f0:
+ # networks:
+ # - infra
+ # enp3s0f1:
+ # networks:
+ # - admin
+
+ - name: {{ HOSTNAME_KVM01 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_KVM01 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ - label: enp9s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
+ - label: enp9s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
+
+ network_config:
+ enp9s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp9s0f1
+
+ - name: {{ HOSTNAME_KVM02 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_KVM02 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ - label: enp9s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
+ - label: enp9s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
+
+ network_config:
+ enp9s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp9s0f1
+
+ - name: {{ HOSTNAME_KVM03 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_KVM03 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ # cloud_init_iface_up: eno1 # see 'interfaces' below.
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ # - label: eno1
+ - label: enp9s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
+ # - label: eno2
+ - label: enp9s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
+
+ network_config:
+ # eno1:
+ enp9s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp9s0f1
+
+ # - name: {{ HOSTNAME_KVM04 }}
+ # role: salt_minion
+ # params:
+ # ipmi_user: !os_env IPMI_USER
+ # ipmi_password: !os_env IPMI_PASSWORD
+ # ipmi_previlegies: OPERATOR
+ # ipmi_host: !os_env IPMI_HOST_KVM04 # hostname or IP address
+ # ipmi_lan_interface: lanplus
+ # ipmi_port: 623
+ #
+ # root_volume_name: system # see 'volumes' below
+ # cloud_init_volume_name: iso # see 'volumes' below
+ # # cloud_init_iface_up: eno1 # see 'interfaces' below.
+ # cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ # volumes:
+ # - name: system
+ # capacity: !os_env NODE_VOLUME_SIZE, 200
+ #
+ # # The same as for agent URL, here is an URL to the image that should be
+ # # used for deploy the node. It should also be accessible from deploying
+ # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+ #
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+ #
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data
+ #
+ # interfaces:
+ # # - label: eno1
+ # - label: enp2s0f0
+ # l2_network_device: admin
+ # mac_address: !os_env ETH0_MAC_ADDRESS_KVM04
+ # # - label: eno2
+ # - label: enp2s0f1
+ # mac_address: !os_env ETH1_MAC_ADDRESS_KVM04
+ #
+ # network_config:
+ # # eno1:
+ # enp2s0f0:
+ # networks:
+ # - admin
+ # bond0:
+ # networks:
+ # - control
+ # aggregation: active-backup
+ # parents:
+ # - enp2s0f1
+ #
+ - name: {{ HOSTNAME_CMP001 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_CMP001 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ # cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
+ cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_hwe
+
+ interfaces:
+ - label: enp2s0f0
+ mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
+ - label: enp2s0f1
+ l2_network_device: admin
+ mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
+ - label: enp5s0f0
+ mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+ - label: enp5s0f1
+ mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+ # - label: enp5s0f2
+ # mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
+ # features: ['dpdk', 'dpdk_pci: 0000:05:00.2']
+
+ network_config:
+ enp2s0f1:
+ networks:
+ - admin
+
+ - name: {{ HOSTNAME_CMP002 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_CMP002 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ # cloud_init_iface_up: eno1 # see 'interfaces' below.
+ cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_hwe
+
+ interfaces:
+ # - label: eno1
+ - label: enp2s0f0
+ mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
+ # - label: eth0
+ - label: enp2s0f1
+ l2_network_device: admin
+ mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
+ # - label: eth3
+ - label: enp5s0f0
+ mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+ # - label: eth2
+ - label: enp5s0f1
+ mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+ # - label: eth4
+ # mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
+ # features: ['dpdk', 'dpdk_pci: 0000:0b:00.0']
+
+ network_config:
+ enp2s0f1:
+ networks:
+ - admin
+
+ # - name: {{ HOSTNAME_CMP003 }}
+ # role: salt_minion
+ # params:
+ # ipmi_user: !os_env IPMI_USER
+ # ipmi_password: !os_env IPMI_PASSWORD
+ # ipmi_previlegies: OPERATOR
+ # ipmi_host: !os_env IPMI_HOST_CMP003 # hostname or IP address
+ # ipmi_lan_interface: lanplus
+ # ipmi_port: 623
+ #
+ # root_volume_name: system # see 'volumes' below
+ # cloud_init_volume_name: iso # see 'volumes' below
+ # # cloud_init_iface_up: eno1 # see 'interfaces' below.
+ # cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ # volumes:
+ # - name: system
+ # capacity: !os_env NODE_VOLUME_SIZE, 200
+ #
+ # # The same as for agent URL, here is an URL to the image that should be
+ # # used for deploy the node. It should also be accessible from deploying
+ # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+ #
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+ #
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data_hwe
+ #
+ # interfaces:
+ # # - label: eno1
+ # - label: enp2s0f1
+ # mac_address: !os_env ETH1_MAC_ADDRESS_CMP003
+ # # - label: eth0
+ # - label: enp2s0f0
+ # l2_network_device: admin
+ # mac_address: !os_env ETH0_MAC_ADDRESS_CMP003
+ #
+ # network_config:
+ # enp2s0f0:
+ # networks:
+ # - admin
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
index 56f8465..438696b 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
@@ -128,7 +128,17 @@
kubernetes_externaldns_enabled: 'False'
kubernetes_keepalived_vip_interface: br_ctl
kubernetes_network_calico_enabled: 'True'
- kubernetes_virtlet_enabled: 'False'
+ kubernetes_virtlet_enabled: 'True'
+ kubernetes_proxy_hostname: prx
+ kubernetes_proxy_node01_hostname: prx01
+ kubernetes_proxy_node02_hostname: prx02
+ kubernetes_proxy_address: 10.167.4.220
+ kubernetes_proxy_node01_address: 10.167.4.221
+ kubernetes_proxy_node02_address: 10.167.4.222
+ kubernetes_metallb_enabled: 'True'
+ metallb_addresses: 172.17.16.150-172.17.16.190
+ kubernetes_ingressnginx_enabled: 'True'
+ kubernetes_ingressnginx_controller_replicas: 2
local_repositories: 'False'
maas_deploy_address: 10.167.5.15
maas_deploy_range_end: 10.167.5.199
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
index 6097cba..4abe271 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
@@ -9,6 +9,8 @@
role: single_dhcp
ens4:
role: single_static_ctl
+ ens5:
+ role: single_storage_dhcp
kvm01:
reclass_storage_name: infra_kvm_node01
@@ -86,6 +88,8 @@
role: single_dhcp
ens4:
role: single_ctl_calico
+ ens5:
+ role: single_storage_dhcp
ctl02:
reclass_storage_name: kubernetes_control_node02
@@ -97,6 +101,8 @@
role: single_dhcp
ens4:
role: single_ctl_calico
+ ens5:
+ role: single_storage_dhcp
ctl03:
reclass_storage_name: kubernetes_control_node03
@@ -108,6 +114,34 @@
role: single_dhcp
ens4:
role: single_ctl_calico
+ ens5:
+ role: single_storage_dhcp
+
+ prx01:
+ reclass_storage_name: kubernetes_proxy_node01
+ roles:
+ - kubernetes_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: single_storage_dhcp
+
+ prx02:
+ reclass_storage_name: kubernetes_proxy_node02
+ roles:
+ - kubernetes_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: single_storage_dhcp
# Generator-based computes. For compatibility only
cmp<<count>>:
@@ -121,6 +155,8 @@
role: single_dhcp
ens4:
role: single_ctl_calico
+ ens5:
+ role: single_storage_dhcp
mon01:
reclass_storage_name: stacklight_server_node01
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
index c0ceb3d..132a382 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
@@ -25,6 +25,7 @@
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02') %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03') %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01') %}
+{% set HOSTNAME_PRX02 = os_env('HOSTNAME_PRX02', 'prx02') %}
{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01') %}
{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02') %}
{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03') %}
@@ -57,6 +58,8 @@
default_{{ HOSTNAME_CID01 }}: +91
default_{{ HOSTNAME_CID02 }}: +92
default_{{ HOSTNAME_CID03 }}: +93
+ default_{{ HOSTNAME_PRX01 }}: +221
+ default_{{ HOSTNAME_PRX02 }}: +222
ip_ranges:
dhcp: [+90, -10]
@@ -94,7 +97,8 @@
default_{{ HOSTNAME_MTR01 }}: +86
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_PRX01 }}: +222
+ default_{{ HOSTNAME_PRX01 }}: +221
+ default_{{ HOSTNAME_PRX02 }}: +222
default_{{ HOSTNAME_KVM }}: +240
default_{{ HOSTNAME_KVM01 }}: +241
default_{{ HOSTNAME_KVM02 }}: +242
@@ -131,6 +135,15 @@
default_{{ HOSTNAME_CID01 }}: +91
default_{{ HOSTNAME_CID02 }}: +92
default_{{ HOSTNAME_CID03 }}: +93
+ default_{{ HOSTNAME_CTL01 }}: +11
+ default_{{ HOSTNAME_CTL02 }}: +12
+ default_{{ HOSTNAME_CTL03 }}: +13
+ default_{{ HOSTNAME_CMP01 }}: +101
+ default_{{ HOSTNAME_CMP02 }}: +102
+ default_{{ HOSTNAME_CMP03 }}: +103
+ default_{{ HOSTNAME_CMP04 }}: +104
+ default_{{ HOSTNAME_PRX01 }}: +221
+ default_{{ HOSTNAME_PRX02 }}: +222
ip_ranges:
dhcp: [+10, -10]
@@ -170,7 +183,7 @@
external:
address_pool: external-pool01
- dhcp: false
+ dhcp: true
forward:
mode: nat
@@ -206,20 +219,26 @@
bus: ide
# source_image: !os_env CFG01_CONFIG_PATH # no source image required.
# it will be uploaded after config drive generation
- interfaces:
+ interfaces: &all_interfaces
- label: ens3
l2_network_device: admin
interface_model: *interface_model
- label: ens4
l2_network_device: private
interface_model: *interface_model
- network_config:
+ - label: ens5
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
ens3:
networks:
- admin
ens4:
networks:
- private
+ ens5:
+ networks:
+ - external
- name: {{ HOSTNAME_KVM01 }}
role: salt_minion
@@ -412,8 +431,8 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CTL02 }}
role: salt_minion
@@ -438,8 +457,8 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CTL03 }}
role: salt_minion
@@ -464,8 +483,8 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CMP01 }}
role: salt_minion
@@ -490,8 +509,8 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CMP02 }}
role: salt_minion
@@ -516,8 +535,8 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CMP03 }}
role: salt_minion
@@ -542,8 +561,8 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CMP04 }}
role: salt_minion
@@ -568,8 +587,8 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_MON01 }}
role: salt_minion
@@ -804,3 +823,55 @@
interfaces: *interfaces
network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_PRX02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
index 8fcb519..b8bda7e 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
@@ -129,6 +129,12 @@
kubernetes_keepalived_vip_interface: br_ctl
kubernetes_network_calico_enabled: 'True'
kubernetes_virtlet_enabled: 'False'
+ kubernetes_proxy_hostname: prx
+ kubernetes_proxy_node01_hostname: prx01
+ kubernetes_proxy_node02_hostname: prx02
+ kubernetes_proxy_address: 10.167.4.220
+ kubernetes_proxy_node01_address: 10.167.4.221
+ kubernetes_proxy_node02_address: 10.167.4.222
local_repositories: 'False'
maas_deploy_address: 10.167.5.15
maas_deploy_range_end: 10.167.5.199
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
index d9e20c6..d13627b 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
@@ -109,6 +109,28 @@
ens4:
role: single_ctl_calico
+ prx01:
+ reclass_storage_name: kubernetes_proxy_node01
+ roles:
+ - kubernetes_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx02:
+ reclass_storage_name: kubernetes_proxy_node02
+ roles:
+ - kubernetes_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
# Generator-based computes. For compatibility only
cmp<<count>>:
reclass_storage_name: kubernetes_compute_rack01
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
index fe1970f..81a8afa 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
@@ -25,6 +25,7 @@
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX02 = os_env('HOSTNAME_PRX02', 'prx02.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
@@ -57,6 +58,8 @@
default_{{ HOSTNAME_CID01 }}: +91
default_{{ HOSTNAME_CID02 }}: +92
default_{{ HOSTNAME_CID03 }}: +93
+ default_{{ HOSTNAME_PRX01 }}: +221
+ default_{{ HOSTNAME_PRX02 }}: +222
ip_ranges:
dhcp: [+90, -10]
@@ -94,7 +97,8 @@
default_{{ HOSTNAME_MTR01 }}: +86
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_PRX01 }}: +222
+ default_{{ HOSTNAME_PRX01 }}: +221
+ default_{{ HOSTNAME_PRX02 }}: +222
default_{{ HOSTNAME_KVM }}: +240
default_{{ HOSTNAME_KVM01 }}: +241
default_{{ HOSTNAME_KVM02 }}: +242
@@ -131,6 +135,8 @@
default_{{ HOSTNAME_CID01 }}: +91
default_{{ HOSTNAME_CID02 }}: +92
default_{{ HOSTNAME_CID03 }}: +93
+ default_{{ HOSTNAME_PRX01 }}: +221
+ default_{{ HOSTNAME_PRX02 }}: +222
ip_ranges:
dhcp: [+10, -10]
@@ -570,3 +576,55 @@
interfaces: *interfaces
network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
index 118322a..7352614 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
@@ -128,6 +128,16 @@
kubernetes_externaldns_enabled: 'False'
kubernetes_keepalived_vip_interface: br_ctl
kubernetes_network_calico_enabled: 'True'
+ kubernetes_proxy_hostname: prx
+ kubernetes_proxy_node01_hostname: prx01
+ kubernetes_proxy_node02_hostname: prx02
+ kubernetes_proxy_address: 10.167.4.220
+ kubernetes_proxy_node01_address: 10.167.4.221
+ kubernetes_proxy_node02_address: 10.167.4.222
+ kubernetes_metallb_enabled: 'True'
+ metallb_addresses: 172.17.16.150-172.17.16.190
+ kubernetes_ingressnginx_enabled: 'True'
+ kubernetes_ingressnginx_controller_replicas: 2
local_repositories: 'False'
maas_deploy_address: 10.167.5.15
maas_deploy_range_end: 10.167.5.199
@@ -174,5 +184,3 @@
kubernetes_network_genie_enabled: 'True'
kubernetes_genie_default_plugin: 'calico'
kubernetes_virtlet_enabled: 'True'
- kubernetes_compute_node01_hostname: cmp001
- kubernetes_compute_node02_hostname: cmp002
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml
index d9e20c6..807d07f 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml
@@ -9,6 +9,8 @@
role: single_dhcp
ens4:
role: single_static_ctl
+ ens5:
+ role: single_storage_dhcp
kvm01:
reclass_storage_name: infra_kvm_node01
@@ -86,6 +88,8 @@
role: single_dhcp
ens4:
role: single_ctl_calico
+ ens5:
+ role: single_storage_dhcp
ctl02:
reclass_storage_name: kubernetes_control_node02
@@ -97,6 +101,8 @@
role: single_dhcp
ens4:
role: single_ctl_calico
+ ens5:
+ role: single_storage_dhcp
ctl03:
reclass_storage_name: kubernetes_control_node03
@@ -108,6 +114,34 @@
role: single_dhcp
ens4:
role: single_ctl_calico
+ ens5:
+ role: single_storage_dhcp
+
+ prx01:
+ reclass_storage_name: kubernetes_proxy_node01
+ roles:
+ - kubernetes_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: single_storage_dhcp
+
+ prx02:
+ reclass_storage_name: kubernetes_proxy_node02
+ roles:
+ - kubernetes_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: single_storage_dhcp
# Generator-based computes. For compatibility only
cmp<<count>>:
@@ -121,3 +155,5 @@
role: single_dhcp
ens4:
role: single_ctl_calico
+ ens5:
+ role: single_storage_dhcp
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
index bda254b..ee69506 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
@@ -15,6 +15,8 @@
{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002') %}
{% set HOSTNAME_CMP03 = os_env('HOSTNAME_CMP03', 'cmp003') %}
{% set HOSTNAME_CMP04 = os_env('HOSTNAME_CMP04', 'cmp004') %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01') %}
+{% set HOSTNAME_PRX02 = os_env('HOSTNAME_PRX02', 'prx02') %}
{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01') %}
{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02') %}
{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03') %}
@@ -47,6 +49,8 @@
default_{{ HOSTNAME_CID01 }}: +91
default_{{ HOSTNAME_CID02 }}: +92
default_{{ HOSTNAME_CID03 }}: +93
+ default_{{ HOSTNAME_PRX01 }}: +221
+ default_{{ HOSTNAME_PRX02 }}: +222
ip_ranges:
dhcp: [+90, -10]
@@ -72,6 +76,8 @@
default_{{ HOSTNAME_CMP02 }}: +102
default_{{ HOSTNAME_CMP03 }}: +103
default_{{ HOSTNAME_CMP04 }}: +104
+ default_{{ HOSTNAME_PRX01 }}: +221
+ default_{{ HOSTNAME_PRX02 }}: +222
default_{{ HOSTNAME_KVM }}: +240
default_{{ HOSTNAME_KVM01 }}: +241
default_{{ HOSTNAME_KVM02 }}: +242
@@ -108,6 +114,15 @@
default_{{ HOSTNAME_CID01 }}: +91
default_{{ HOSTNAME_CID02 }}: +92
default_{{ HOSTNAME_CID03 }}: +93
+ default_{{ HOSTNAME_CTL01 }}: +11
+ default_{{ HOSTNAME_CTL02 }}: +12
+ default_{{ HOSTNAME_CTL03 }}: +13
+ default_{{ HOSTNAME_CMP01 }}: +101
+ default_{{ HOSTNAME_CMP02 }}: +102
+ default_{{ HOSTNAME_CMP03 }}: +103
+ default_{{ HOSTNAME_CMP04 }}: +104
+ default_{{ HOSTNAME_PRX01 }}: +221
+ default_{{ HOSTNAME_PRX02 }}: +222
ip_ranges:
dhcp: [+10, -10]
@@ -147,7 +162,7 @@
external:
address_pool: external-pool01
- dhcp: false
+ dhcp: true
forward:
mode: nat
@@ -183,20 +198,26 @@
bus: ide
# source_image: !os_env CFG01_CONFIG_PATH # no source image required.
# it will be uploaded after config drive generation
- interfaces:
+ interfaces: &all_interfaces
- label: ens3
l2_network_device: admin
interface_model: *interface_model
- label: ens4
l2_network_device: private
interface_model: *interface_model
- network_config:
+ - label: ens5
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
ens3:
networks:
- admin
ens4:
networks:
- private
+ ens5:
+ networks:
+ - external
- name: {{ HOSTNAME_KVM01 }}
role: salt_minion
@@ -389,8 +410,8 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CTL02 }}
role: salt_minion
@@ -415,8 +436,8 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CTL03 }}
role: salt_minion
@@ -441,8 +462,8 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CMP01 }}
role: salt_minion
@@ -467,8 +488,8 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CMP02 }}
role: salt_minion
@@ -493,8 +514,8 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CMP03 }}
role: salt_minion
@@ -519,8 +540,8 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CMP04 }}
role: salt_minion
@@ -545,5 +566,57 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_PRX02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
index a38f2f3..12e013c 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
@@ -9,6 +9,17 @@
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+- description: "Workaround to avoid reboot cmp nodes: apply patch to bring OVS interfaces UP (PROD-24343)"
+ cmd: |
+ set -ex
+ git clone https://gerrit.mcp.mirantis.com/salt-formulas/linux /root/salt-formula-linux
+ cd /root/salt-formula-linux
+ git fetch https://gerrit.mcp.mirantis.com/salt-formulas/linux refs/changes/32/29432/11 && git checkout FETCH_HEAD
+ cp -r /root/salt-formula-linux/linux/ /srv/salt/env/prd/
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
index 1561577..94b6549 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
@@ -261,3 +261,8 @@
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
openstack_octavia_enabled: 'True'
+ octavia_hm_bind_ip: 192.168.1.12
+ octavia_lb_mgmt_cidr: 192.168.1.0/24
+ octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
+ octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
+
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
index 4905e32..33440ad 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
@@ -9,6 +9,17 @@
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+- description: "Workaround to avoid reboot cmp nodes: apply patch to bring OVS interfaces UP (PROD-24343)"
+ cmd: |
+ set -ex
+ git clone https://gerrit.mcp.mirantis.com/salt-formulas/linux /root/salt-formula-linux
+ cd /root/salt-formula-linux
+ git fetch https://gerrit.mcp.mirantis.com/salt-formulas/linux refs/changes/32/29432/11 && git checkout FETCH_HEAD
+ cp -r /root/salt-formula-linux/linux/ /srv/salt/env/prd/
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml
new file mode 100644
index 0000000..855363b
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml
@@ -0,0 +1,269 @@
+default_context:
+ auditd_enabled: 'False'
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ cicd_control_node01_address: 10.167.4.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.4.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.4.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.4.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
+ 3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
+ AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
+ xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
+ B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
+ q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
+ s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
+ V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
+ 9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
+ pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
+ MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
+ 7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
+ udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
+ R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
+ XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
+ Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
+ KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
+ 6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
+ ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
+ ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
+ Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
+ r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
+ mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
+ qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
+ 9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
+ cluster_domain: cookied-cicd-queens-dvr-sl.local
+ cluster_name: cookied-cicd-queens-dvr-sl
+ compute_bond_mode: active-backup
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.4.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: master
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+ deploy_network_gateway: 10.167.5.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: 10.167.5.0/24
+ deployment_type: physical
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
+ email_address: ddmitriev@mirantis.com
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.4.241
+ infra_kvm01_deploy_address: 10.167.5.91
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.4.242
+ infra_kvm02_deploy_address: 10.167.5.92
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.4.243
+ infra_kvm03_deploy_address: 10.167.5.93
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.4.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 10.167.5.15
+ maas_deploy_range_end: 10.167.5.199
+ maas_deploy_range_start: 10.167.5.180
+ maas_deploy_vlan: '0'
+ maas_fabric_name: deploy-fabric0
+ maas_hostname: cfg01
+ mcp_version: proposed
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openldap_domain: ${_param:cluster_name}.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ openssh_groups: cicd
+ openstack_benchmark_node01_address: 10.167.4.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 10.167.4
+ openstack_compute_rack01_tenant_subnet: 10.167.6
+ openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
+ openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
+ openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
+ openstack_control_address: 10.167.4.100
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.4.101
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.4.102
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.4.103
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.4.100
+ openstack_database_hostname: ctl
+ openstack_database_node01_address: 10.167.4.101
+ openstack_database_node01_hostname: ctl01
+ openstack_database_node02_address: 10.167.4.102
+ openstack_database_node02_hostname: ctl02
+ openstack_database_node03_address: 10.167.4.103
+ openstack_database_node03_hostname: ctl03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 10.167.4.110
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 10.167.6.6
+ openstack_gateway_node02_address: 10.167.4.111
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.167.6.7
+ openstack_gateway_node03_address: 10.167.4.112
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 10.167.6.8
+ openstack_message_queue_address: 10.167.4.100
+ openstack_message_queue_hostname: ctl
+ openstack_message_queue_node01_address: 10.167.4.101
+ openstack_message_queue_node01_hostname: ctl01
+ openstack_message_queue_node02_address: 10.167.4.102
+ openstack_message_queue_node02_hostname: ctl02
+ openstack_message_queue_node03_address: 10.167.4.103
+ openstack_message_queue_node03_hostname: ctl03
+ openstack_network_engine: ovs
+ openstack_neutron_qos: 'False'
+ openstack_neutron_vlan_aware_vms: 'False'
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_ovs_dvr_enabled: 'True'
+ openstack_ovs_encapsulation_type: vxlan
+ openstack_proxy_address: 10.167.4.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.4.121
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.4.122
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.4.19
+ openstack_version: queens
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_webhook_app_id: '24'
+ oss_pushkin_email_sender_password: password
+ oss_pushkin_smtp_port: '587'
+ oss_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
+ salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+ salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+ salt_master_address: 10.167.4.15
+ salt_master_hostname: cfg01
+ salt_master_management_address: 10.167.5.15
+ shared_reclass_branch: 'proposed'
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
+ fluentd_enabled: 'True'
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.4.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.4.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.4.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.4.63
+ stacklight_log_node03_hostname: log03
+ stacklight_monitor_address: 10.167.4.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.4.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.4.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.4.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.4.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.4.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.4.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.4.88
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ stacklight_long_term_storage_type: prometheus
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.167.6.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.167.6.0/24
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'False'
+ octavia_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEAtjnPDJsQToHBtoqIo15mdSYpfi8z6DFMi8Gbo0KCN33OUn5u
+ OctbdtjUfeuhvI6px1SCnvyWi09Ft8eWwq+KwLCGKbUxLvqKltuJ7K3LIrGXkt+m
+ qZN4O9XKeVKfZH+mQWkkxRWgX2r8RKNV3GkdNtd74VjhP+R6XSKJQ1Z8b7eHM10v
+ 6IjTY/jPczjK+eyCeEj4qbSnV8eKlqLhhquuSQRmUO2DRSjLVdpdf2BB4/BdWFsD
+ YOmX7mb8kpEr9vQ+c1JKMXDwD6ehzyU8kE+1kVm5zOeEy4HdYIMpvUfN49P1anRV
+ 2ISQ1ZE+r22IAMKl0tekrGH0e/1NP1DF5rINMwIDAQABAoIBAQCkP/cgpaRNHyg8
+ ISKIHs67SWqdEm73G3ijgB+JSKmW2w7dzJgN//6xYUAnP/zIuM7PnJ0gMQyBBTMS
+ NBTv5spqZLKJZYivj6Tb1Ya8jupKm0jEWlMfBo2ZYVrfgFmrfGOfEebSvmuPlh9M
+ vuzlftmWVSSUOkjODmM9D6QpzgrbpktBuA/WpX+6esMTwJpOcQ5xZWEnHXnVzuTc
+ SncodVweE4gz6F1qorbqIJz8UAUQ5T0OZTdHzIS1IbamACHWaxQfixAO2s4+BoUK
+ ANGGZWkfneCxx7lthvY8DiKn7M5cSRnqFyDToGqaLezdkMNlGC7v3U11FF5blSEW
+ fL1o/HwBAoGBAOavhTr8eqezTchqZvarorFIq7HFWk/l0vguIotu6/wlh1V/KdF+
+ aLLHgPgJ5j+RrCMvTBoKqMeeHfVGrS2udEy8L1mK6b3meG+tMxU05OA55abmhYn7
+ 7vF0q8XJmYIHIXmuCgF90R8Piscb0eaMlmHW9unKTKo8EOs5j+D8+AMJAoGBAMo4
+ 8WW+D3XiD7fsymsfXalf7VpAt/H834QTbNZJweUWhg11eLutyahyyfjjHV200nNZ
+ cnU09DWKpBbLg7d1pyT69CNLXpNnxuWCt8oiUjhWCUpNqVm2nDJbUdlRFTzYb2fS
+ ZC4r0oQaPD5kMLSipjcwzMWe0PniySxNvKXKInFbAoGBAKxW2qD7uKKKuQSOQUft
+ aAksMmEIAHWKTDdvOA2VG6XvX5DHBLXmy08s7rPfqW06ZjCPCDq4Velzvgvc9koX
+ d/lP6cvqlL9za+x6p5wjPQ4rEt/CfmdcmOE4eY+1EgLrUt314LHGjjG3ScWAiirE
+ QyDrGOIGaYoQf89L3KqIMr0JAoGARYAklw8nSSCUvmXHe+Gf0yKA9M/haG28dCwo
+ 780RsqZ3FBEXmYk1EYvCFqQX56jJ25MWX2n/tJcdpifz8Q2ikHcfiTHSI187YI34
+ lKQPFgWb08m1NnwoWrY//yx63BqWz1vjymqNQ5GwutC8XJi5/6Xp+tGGiRuEgJGH
+ EIPUKpkCgYAjBIVMkpNiLCREZ6b+qjrPV96ed3iTUt7TqP7yGlFI/OkORFS38xqC
+ hBP6Fk8iNWuOWQD+ohM/vMMnvIhk5jwlcwn+kF0ra04gi5KBFWSh/ddWMJxUtPC1
+ 2htvlEc6zQAR6QfqXHmwhg1hP81JcpqpicQzCMhkzLoR1DC6stXdLg==
+ -----END RSA PRIVATE KEY-----
+ octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2Oc8MmxBOgcG2ioijXmZ1Jil+LzPoMUyLwZujQoI3fc5Sfm45y1t22NR966G8jqnHVIKe/JaLT0W3x5bCr4rAsIYptTEu+oqW24nsrcsisZeS36apk3g71cp5Up9kf6ZBaSTFFaBfavxEo1XcaR0213vhWOE/5HpdIolDVnxvt4czXS/oiNNj+M9zOMr57IJ4SPiptKdXx4qWouGGq65JBGZQ7YNFKMtV2l1/YEHj8F1YWwNg6ZfuZvySkSv29D5zUkoxcPAPp6HPJTyQT7WRWbnM54TLgd1ggym9R83j0/VqdFXYhJDVkT6vbYgAwqXS16SsYfR7/U0/UMXmsg0z
+ openstack_octavia_enabled: 'True'
+ octavia_hm_bind_ip: 192.168.1.12
+ octavia_lb_mgmt_cidr: 192.168.1.0/24
+ octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
+ octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
+
+
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/environment_context.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/environment_context.yaml
new file mode 100644
index 0000000..c3efdde
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/environment_context.yaml
@@ -0,0 +1,262 @@
+nodes:
+ cfg01:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ classes:
+ - environment.cookied-cicd-queens-dvr-sl.override_ntp_virtual
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_static_ctl
+
+ kvm01:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kvm02:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kvm03:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid01:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid02:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid03:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl01:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - openstack_database_leader
+ - openstack_message_queue
+ - features_lvm_backend_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl02:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_lvm_backend_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl03:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_lvm_backend_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx01:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon01:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon02:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon03:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log01:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log02:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log03:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr01:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr02:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr03:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - features_lvm_backend_volume_vdb
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw01:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/salt.yaml
new file mode 100644
index 0000000..62a8a23
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/salt.yaml
@@ -0,0 +1,25 @@
+{% from 'cookied-cicd-queens-dvr-sl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-queens-dvr-sl/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-queens-dvr-sl/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+- description: "Workaround to avoid reboot cmp nodes: apply patch to bring OVS interfaces UP (PROD-24343)"
+ cmd: |
+ set -ex
+ git clone https://gerrit.mcp.mirantis.com/salt-formulas/linux /root/salt-formula-linux
+ cd /root/salt-formula-linux
+ git fetch https://gerrit.mcp.mirantis.com/salt-formulas/linux refs/changes/32/29432/11 && git checkout FETCH_HEAD
+ cp -r /root/salt-formula-linux/linux/ /srv/salt/env/prd/
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--meta-data.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--meta-data.yaml
copy to tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..4c43578
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data-cfg01.yaml
@@ -0,0 +1,101 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo touch /is_cloud_init_started
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - echo "******** MOUNT CONFIG DRIVE"
+ # Mount config drive
+ - mkdir /root/config-drive
+ - mount /dev/sr0 /root/config-drive
+
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ #- sudo ifdown ens3
+ #- sudo ip r d default || true # remove existing default route to get it from dhcp
+ #- sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ # Run user data script from config drive
+ - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3; ip l set down ens3
+ - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4; ip l set down ens4
+ - rm -f /etc/network/interfaces
+ #- ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
+ #- cp /root/config-drive/user-data /root/user-data
+ #- sed -i '/^reboot$/d' /root/user-data
+ #- set -x; cd /root && /bin/bash -xe ./user-data
+ - |
+ set -x
+ cd /root/config-drive
+ if /bin/bash -xe ./user-data; then
+ touch /is_cloud_init_finished
+ else
+ set +x
+ echo "bootstrap script /root/config-drive/user-data failed\n" > /is_cloud_init_failed
+ fi
+
+ # Enable root access (after reboot)
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ #- path: /etc/network/interfaces
+ - path: /root/interfaces
+ content: |
+ auto lo
+ iface lo inet loopback
+
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 60
+ ServerAliveCountMax 0
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml
new file mode 100644
index 0000000..319c007
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml
@@ -0,0 +1,59 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml
new file mode 100644
index 0000000..baa714d
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml
@@ -0,0 +1,867 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-cicd-queens-dvr-sl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-queens-dvr-sl') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-cicd-queens-dvr-sl_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+ default_{{ HOSTNAME_KVM }}: +240
+ default_{{ HOSTNAME_KVM01 }}: +241
+ default_{{ HOSTNAME_KVM02 }}: +242
+ default_{{ HOSTNAME_KVM03 }}: +243
+ default_{{ HOSTNAME_CID }}: +90
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+ default_{{ HOSTNAME_KVM }}: +240
+ default_{{ HOSTNAME_KVM01 }}: +241
+ default_{{ HOSTNAME_KVM02 }}: +242
+ default_{{ HOSTNAME_KVM03 }}: +243
+ default_{{ HOSTNAME_CID }}: +90
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+180, +220]
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: route
+
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ tenant:
+ address_pool: tenant-pool01
+ dhcp: false
+
+ external:
+ address_pool: external-pool01
+ dhcp: false
+ forward:
+ mode: route
+
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: {{ os_env('MCP_IMAGE_PATH1604') }} # http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2
+ format: qcow2
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
+ source_image: !os_env MCP_IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
+ memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ volumes:
+ - name: system
+ capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: config
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
+ # it will be uploaded after config drive generation
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+
+ interfaces: &all_interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: tenant
+ interface_model: *interface_model
+ - label: ens6
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - tenant
+ ens6:
+ networks:
+ - external
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_KVM01 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+ memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_KVM02 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+ memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_KVM03 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+ memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID01 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID02 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID03 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml
index e1d2cb4..8954160 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml
@@ -2,98 +2,11 @@
{% import 'shared-core.yaml' as SHARED_CORE with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml
index b3c533d..b335251 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml
@@ -9,6 +9,7 @@
{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
# Install OpenStack control services
@@ -172,11 +173,7 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
@@ -189,9 +186,3 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
-
-- description: Set floating ip address on br-floating
- cmd: ifconfig br-floating {{ IPV4_NET_EXTERNAL_PREFIX }}.110/24
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
index 9572556..81afdb5 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
@@ -18,8 +18,8 @@
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml
index 0c03d81..6a1278e 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml
@@ -2,99 +2,11 @@
{% import 'shared-core.yaml' as SHARED_CORE with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml
index eade071..6672997 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml
@@ -7,6 +7,7 @@
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
# Deploy nginx before openstack services (PROD-22740)
- description: Deploy nginx proxy
@@ -194,17 +195,7 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: iptables --policy FORWARD ACCEPT
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml
index 6fefafb..4fee5c5 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml
@@ -18,8 +18,8 @@
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/core.yaml
index ac74382..edb5059 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/core.yaml
@@ -2,99 +2,11 @@
{% import 'shared-core.yaml' as SHARED_CORE with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml
index e298fcf..7260beb 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml
@@ -9,6 +9,7 @@
{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
# Install OpenStack control services
@@ -172,17 +173,7 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: iptables --policy FORWARD ACCEPT
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml
index 62072c6..7d6147d 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml
@@ -18,8 +18,8 @@
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/core.yaml
index a7cd35f..4b79fcb 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/core.yaml
@@ -2,99 +2,11 @@
{% import 'shared-core.yaml' as SHARED_CORE with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
\ No newline at end of file
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml
index 52c0fdb..04871da 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml
@@ -7,6 +7,7 @@
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
# Deploy nginx before openstack services (PROD-22740)
- description: Deploy nginx proxy
@@ -194,17 +195,7 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: iptables --policy FORWARD ACCEPT
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml
index b21f928..883c30f 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml
@@ -18,8 +18,8 @@
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
similarity index 98%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
index b860da9..a74e3d7 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
@@ -2,8 +2,9 @@
bmk_enabled: 'False'
ceph_enabled: 'False'
cicd_enabled: 'False'
- cluster_domain: virtual-mcp-ocata-dvr.local
- cluster_name: virtual-mcp-ocata-dvr
+ designate_backend: bind
+ cluster_domain: cookied-mcp-ocata-dvr.local
+ cluster_name: cookied-mcp-ocata-dvr
compute_bond_mode: active-backup
compute_primary_first_nic: eth1
compute_primary_second_nic: eth2
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-environment.yaml
similarity index 90%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/_context-environment.yaml
index 6afe16e..f7518bc 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-environment.yaml
@@ -17,9 +17,9 @@
- openstack_control_leader
- openstack_database_leader
- openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_designate_pool_manager_keystone
+ - features_designate_bind9_database
+ - features_designate_bind9
+ - features_designate_bind9_keystone
- features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
@@ -35,8 +35,8 @@
- openstack_control
- openstack_database
- openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
+ - features_designate_bind9_database
+ - features_designate_bind9
- features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
@@ -52,8 +52,8 @@
- openstack_control
- openstack_database
- openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
+ - features_designate_bind9_database
+ - features_designate_bind9
- features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
@@ -66,7 +66,7 @@
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
- - features_designate_pool_manager_proxy
+ - features_designate_bind9_proxy
- linux_system_codename_xenial
interfaces:
ens3:
@@ -148,7 +148,7 @@
dns01.mcp11-ovs-dpdk.local:
reclass_storage_name: openstack_dns_node01
roles:
- - features_designate_pool_manager_dns
+ - features_designate_bind9_dns
- linux_system_codename_xenial
classes:
- system.linux.system.repo.mcp.extra
@@ -165,7 +165,7 @@
dns02.mcp11-ovs-dpdk.local:
reclass_storage_name: openstack_dns_node02
roles:
- - features_designate_pool_manager_dns
+ - features_designate_bind9_dns
- linux_system_codename_xenial
classes:
- system.linux.system.repo.mcp.extra
@@ -177,4 +177,4 @@
role: single_dhcp
ens4:
role: single_ctl
- single_address: ${_param:openstack_dns_node02_address}
+ single_address: ${_param:openstack_dns_node02_address}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/core.yaml
new file mode 100644
index 0000000..fc5d4f8
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/core.yaml
@@ -0,0 +1,12 @@
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/openstack.yaml
similarity index 90%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/openstack.yaml
index e3a98f1..dc9de1c 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/openstack.yaml
@@ -1,14 +1,15 @@
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
# Install OpenStack control services
@@ -53,9 +54,9 @@
{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
# install designate backend
-- description: Install powerdns
+- description: Install bind
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@powerdns:server' state.sls powerdns.server
+ -C 'I@bind:server' state.sls bind
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -172,17 +173,7 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: iptables --policy FORWARD ACCEPT
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-ocata-dvr/overrides-policy.yml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/overrides-policy.yml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/overrides-policy.yml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/salt.yaml
similarity index 81%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/salt.yaml
index 54f4420..9d3deb7 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/salt.yaml
@@ -1,14 +1,14 @@
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import DOMAIN_NAME with context %}
{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/virtual-mcp-ocata-dvr/overrides.yml') %}
+{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-ocata-dvr/overrides.yml') %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/sl.yaml
similarity index 98%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/sl.yaml
index ff0e77a..405e647 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/sl.yaml
@@ -1,4 +1,4 @@
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
# Install docker swarm
- description: Configure docker service
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--meta-data.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--meta-data.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data1604.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay.yaml
similarity index 97%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/underlay.yaml
index 6f69a74..4893e2c 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay.yaml
@@ -1,9 +1,9 @@
# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% import 'virtual-mcp-ocata-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-mcp-ocata-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-ocata-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
aliases:
@@ -12,14 +12,14 @@
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-dvr') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-ocata-dvr') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
@@ -30,7 +30,7 @@
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'cookied-mcp-ocata-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
private-pool01:
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
similarity index 98%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
index 5dffbd3..2a6d8f9 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
@@ -2,8 +2,8 @@
bmk_enabled: 'False'
ceph_enabled: 'False'
cicd_enabled: 'False'
- cluster_domain: virtual-mcp-ocata-ovs.local
- cluster_name: virtual-mcp-ocata-ovs
+ cluster_domain: cookied-mcp-ocata-ovs.local
+ cluster_name: cookied-mcp-ocata-ovs
compute_bond_mode: active-backup
compute_primary_first_nic: eth1
compute_primary_second_nic: eth2
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-environment.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/_context-environment.yaml
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/core.yaml
new file mode 100644
index 0000000..6fc2af4
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/core.yaml
@@ -0,0 +1,12 @@
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/openstack.yaml
similarity index 89%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/openstack.yaml
index 50bad03..4072632 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/openstack.yaml
@@ -1,12 +1,13 @@
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
# Deploy nginx before openstack services (PROD-22740)
- description: Deploy nginx proxy
@@ -148,17 +149,7 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: iptables --policy FORWARD ACCEPT
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/salt.yaml
similarity index 81%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
copy to tcp_tests/templates/cookied-mcp-ocata-ovs/salt.yaml
index 54f4420..41827c7 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/salt.yaml
@@ -1,14 +1,14 @@
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import DOMAIN_NAME with context %}
{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/virtual-mcp-ocata-dvr/overrides.yml') %}
+{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-ocata-ovs/overrides.yml') %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/sl.yaml
similarity index 98%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/sl.yaml
index 9ec64be..7cc598b 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/sl.yaml
@@ -1,4 +1,4 @@
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--meta-data.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--meta-data.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data1604.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data1604.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay.yaml
similarity index 97%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/underlay.yaml
index c64bdc8..2d31a5a 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay.yaml
@@ -1,9 +1,9 @@
# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% import 'virtual-mcp-ocata-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-mcp-ocata-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-ocata-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
aliases:
@@ -12,14 +12,14 @@
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-ovs') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-ocata-ovs') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
@@ -28,7 +28,7 @@
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'cookied-mcp-ocata-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
private-pool01:
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/core.yaml
index 8057165..5716d76 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/core.yaml
@@ -1,118 +1,20 @@
{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
index 7df1f81..22e4442 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
@@ -92,7 +92,7 @@
gainsight_service_enabled: 'False'
gateway_primary_first_nic: eth1
gateway_primary_second_nic: eth2
- gnocchi_aggregation_storage: file
+ gnocchi_aggregation_storage: ceph
infra_bond_mode: active-backup
infra_deploy_nic: eth0
infra_kvm01_control_address: 10.167.4.11
@@ -209,9 +209,17 @@
tenant_network_gateway: 10.167.6.1
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: 10.167.6.0/24
- tenant_telemetry_enabled: 'False'
+ tenant_telemetry_enabled: 'True'
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'True'
version: proposed
vnf_onboarding_enabled: 'False'
+ openstack_telemetry_address: 172.16.10.83
+ openstack_telemetry_hostname: mdb
+ openstack_telemetry_node01_address: 172.16.10.84
+ openstack_telemetry_node01_hostname: mdb01
+ openstack_telemetry_node02_address: 172.16.10.85
+ openstack_telemetry_node02_hostname: mdb02
+ openstack_telemetry_node03_address: 172.16.10.86
+ openstack_telemetry_node03_hostname: mdb03
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/core.yaml
index 5c6a2f8..08a3c00 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/core.yaml
@@ -1,118 +1,19 @@
{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
index 758a86d..8531cc3 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
@@ -31,6 +31,16 @@
{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_REDIS() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GNOCCHI() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_PANKO() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CEILOMETER() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_AODH() }}
+
{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE() }}
\ No newline at end of file
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
index a140743..b36f8be 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
@@ -29,6 +29,9 @@
{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd2.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
template:
devops_settings:
@@ -57,6 +60,9 @@
default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_MDB01 }}: +84
+ default_{{ HOSTNAME_MDB02 }}: +85
+ default_{{ HOSTNAME_MDB03 }}: +86
ip_ranges:
dhcp: [+70, -10]
@@ -82,6 +88,9 @@
default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_MDB01 }}: +84
+ default_{{ HOSTNAME_MDB02 }}: +85
+ default_{{ HOSTNAME_MDB03 }}: +86
ip_ranges:
dhcp: [+70, -10]
@@ -107,6 +116,9 @@
default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_MDB01 }}: +84
+ default_{{ HOSTNAME_MDB02 }}: +85
+ default_{{ HOSTNAME_MDB03 }}: +86
ip_ranges:
dhcp: [+10, -10]
@@ -132,6 +144,9 @@
default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_MDB01 }}: +84
+ default_{{ HOSTNAME_MDB02 }}: +85
+ default_{{ HOSTNAME_MDB03 }}: +86
ip_ranges:
dhcp: [+130, +230]
@@ -316,6 +331,93 @@
interfaces: *interfaces
network_config: *network_config
+ - name: {{ HOSTNAME_MDB01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MDB02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MDB03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
- name: {{ HOSTNAME_CMN01 }}
role: salt_minion
params:
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
index 74a1465..c89ec89 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
@@ -169,4 +169,37 @@
ens3:
role: single_dhcp
ens4:
- role: single_ctl
\ No newline at end of file
+ role: single_ctl
+
+ mdb01.cookied-mcp-pike-dvr-ceph.local:
+ reclass_storage_name: openstack_telemetry_node01
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mdb02.cookied-mcp-pike-dvr-ceph.local:
+ reclass_storage_name: openstack_telemetry_node02
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mdb03.cookied-mcp-pike-dvr-ceph.local:
+ reclass_storage_name: openstack_telemetry_node03
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml
similarity index 75%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml
index b860da9..0ad8daf 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml
@@ -1,9 +1,12 @@
default_context:
+ barbican_backend: dogtag
+ barbican_enabled: 'True'
+ auditd_enabled: 'True'
bmk_enabled: 'False'
ceph_enabled: 'False'
cicd_enabled: 'False'
- cluster_domain: virtual-mcp-ocata-dvr.local
- cluster_name: virtual-mcp-ocata-dvr
+ cluster_domain: cookied-mcp-pike-dvr-ssl-barbican.local
+ cluster_name: cookied-mcp-pike-dvr-ssl-barbican
compute_bond_mode: active-backup
compute_primary_first_nic: eth1
compute_primary_second_nic: eth2
@@ -41,7 +44,8 @@
local_repositories: 'False'
maas_deploy_address: 192.168.10.90
maas_hostname: cfg01
- mcp_version: stable
+ maas_enabled: 'False'
+ mcp_version: proposed
offline_deployment: 'False'
opencontrail_enabled: 'False'
openstack_benchmark_node01_address: 172.16.10.95
@@ -103,7 +107,12 @@
openstack_proxy_node02_address: 172.16.10.122
openstack_proxy_node02_hostname: prx02
openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: ocata
+ openstack_dns_hostname: dns
+ openstack_dns_node01_address: 172.16.10.113
+ openstack_dns_node01_hostname: dns01
+ openstack_dns_node02_address: 172.16.10.114
+ openstack_dns_node02_hostname: dns02
+ openstack_version: pike
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
oss_webhook_app_id: '24'
@@ -150,34 +159,33 @@
salt_master_management_address: 192.168.10.90
shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
+ stacklight_enabled: 'False'
+ stacklight_log_address: 172.16.10.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 172.16.10.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 172.16.10.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 172.16.10.63
+ stacklight_log_node03_hostname: log03
stacklight_monitor_address: 172.16.10.70
stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
+ stacklight_monitor_node01_address: 172.16.10.71
stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
+ stacklight_monitor_node02_address: 172.16.10.72
stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
+ stacklight_monitor_node03_address: 172.16.10.73
stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
+ stacklight_telemetry_address: 172.16.10.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 172.16.10.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 172.16.10.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 172.16.10.88
+ stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
+ stacklight_long_term_storage_type: influxdb
static_ips_on_deploy_network_enabled: 'False'
tenant_network_gateway: 10.1.0.1
tenant_network_netmask: 255.255.255.0
@@ -185,3 +193,32 @@
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
+ rsync_fernet_rotation: 'True'
+ compute_padding_with_zeros: False
+ designate_backend: bind
+ designate_enabled: 'False'
+ nova_vnc_tls_enabled: 'True'
+ galera_ssl_enabled: 'True'
+ openstack_mysql_x509_enabled: 'True'
+ rabbitmq_ssl_enabled: 'True'
+ openstack_rabbitmq_x509_enabled: 'True'
+ openstack_internal_protocol: 'https'
+ tenant_telemetry_enabled: 'False'
+ gnocchi_aggregation_storage: file
+ manila_enabled: 'False'
+ manila_share_backend: 'lvm'
+ manila_lvm_volume_name: 'manila-volume'
+ manila_lvm_devices: '/dev/vdc'
+ openstack_share_address: 172.16.10.203
+ openstack_share_node01_address: 172.16.10.204
+ openstack_share_node01_deploy_address: 192.168.10.204
+ openstack_share_hostname: share
+ openstack_share_node01_hostname: share01
+ openstack_barbican_address: 172.16.10.44
+ openstack_barbican_hostname: kmn
+ openstack_barbican_node01_address: 172.16.10.45
+ openstack_barbican_node01_hostname: kmn01
+ openstack_barbican_node02_address: 172.16.10.46
+ openstack_barbican_node02_hostname: kmn02
+ openstack_barbican_node03_address: 172.16.10.47
+ openstack_barbican_node03_hostname: kmn03
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-environment.yaml
new file mode 100644
index 0000000..f704f65
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-environment.yaml
@@ -0,0 +1,132 @@
+nodes:
+ cfg01.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl01.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - infra_kvm
+ - openstack_control_leader
+ - openstack_database_leader
+ - openstack_message_queue
+ - features_lvm_backend_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl02.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_lvm_backend_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl03.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_lvm_backend_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kmn01.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_barbican_node01
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kmn02.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_barbican_node02
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kmn03.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_barbican_node03
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx01.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - features_lvm_backend_volume_vdb
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw01.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/core.yaml
new file mode 100644
index 0000000..f5a0013
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/core.yaml
@@ -0,0 +1,19 @@
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/openstack.yaml
new file mode 100644
index 0000000..627ed30
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/openstack.yaml
@@ -0,0 +1,36 @@
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+# Install OpenStack control services
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_DOGTAG() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_BARBICAN() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/salt.yaml
new file mode 100644
index 0000000..df13ee9
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/salt.yaml
@@ -0,0 +1,43 @@
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+- description: "Temp fix"
+ cmd: |
+ set -e;
+ apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+ [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
+ . /root/venv-reclass-tools/bin/activate;
+ pip install git+https://github.com/dis-xcom/reclass-tools;
+ reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--meta-data.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--meta-data.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml
similarity index 87%
copy from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml
index c64bdc8..e1befcb 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml
@@ -1,9 +1,9 @@
# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% import 'virtual-mcp-ocata-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-mcp-pike-dvr-ssl-barbican/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
aliases:
@@ -12,23 +12,23 @@
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-dvr-ssl-barbican') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KMN01 = os_env('HOSTNAME_KMN01', 'kmn01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KMN02 = os_env('HOSTNAME_KMN02', 'kmn02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KMN03 = os_env('HOSTNAME_KMN03', 'kmn03.' + DOMAIN_NAME) %}
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'cookied-mcp-pike-dvr-ssl-barbican_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
private-pool01:
@@ -43,11 +43,11 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_KMN01 }}: +45
+ default_{{ HOSTNAME_KMN02 }}: +46
+ default_{{ HOSTNAME_KMN03 }}: +47
ip_ranges:
dhcp: [+90, -10]
@@ -63,11 +63,11 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_KMN01 }}: +45
+ default_{{ HOSTNAME_KMN02 }}: +46
+ default_{{ HOSTNAME_KMN03 }}: +47
ip_ranges:
dhcp: [+90, -10]
@@ -83,11 +83,11 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_KMN01 }}: +45
+ default_{{ HOSTNAME_KMN02 }}: +46
+ default_{{ HOSTNAME_KMN03 }}: +47
ip_ranges:
dhcp: [+10, -10]
@@ -103,14 +103,13 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_KMN01 }}: +45
+ default_{{ HOSTNAME_KMN02 }}: +46
+ default_{{ HOSTNAME_KMN03 }}: +47
ip_ranges:
- dhcp: [+10, -10]
-
+ dhcp: [+130, +220]
groups:
- name: default
@@ -150,11 +149,10 @@
external:
address_pool: external-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
-
group_volumes:
- name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
@@ -162,11 +160,8 @@
- name: cfg01_day01_image # Pre-configured day01 image
source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
+ - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
+ source_image: !os_env MCP_IMAGE_PATH1604
format: qcow2
nodes:
@@ -298,11 +293,11 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON01 }}
+ - name: {{ HOSTNAME_KMN01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -324,11 +319,11 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON02 }}
+ - name: {{ HOSTNAME_KMN02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -350,11 +345,11 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON03 }}
+ - name: {{ HOSTNAME_KMN03 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -419,6 +414,9 @@
- name: cinder
capacity: 50
format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -473,6 +471,9 @@
- name: cinder
capacity: 50
format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -488,8 +489,8 @@
- name: {{ HOSTNAME_GTW01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
index 89237be..2fdfc6b 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
@@ -187,6 +187,14 @@
stacklight_version: '2'
stacklight_long_term_storage_type: influxdb
static_ips_on_deploy_network_enabled: 'False'
+ openstack_telemetry_address: 172.16.10.96
+ openstack_telemetry_hostname: mdb
+ openstack_telemetry_node01_address: 172.16.10.97
+ openstack_telemetry_node01_hostname: mdb01
+ openstack_telemetry_node02_address: 172.16.10.98
+ openstack_telemetry_node02_hostname: mdb02
+ openstack_telemetry_node03_address: 172.16.10.99
+ openstack_telemetry_node03_hostname: mdb03
tenant_network_gateway: 10.1.0.1
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: 10.1.0.0/24
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
index f2f7742..83998a7 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
@@ -196,3 +196,36 @@
role: bond0_ab_ovs_vxlan_mesh
ens6:
role: bond1_ab_ovs_floating
+
+ mdb01.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_telemetry_node01
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mdb02.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_telemetry_node02
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mdb03.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_telemetry_node03
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/core.yaml
index 4545ad4..f3d274a 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/core.yaml
@@ -12,6 +12,8 @@
{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml
index 5c930fc..c1e32ec 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml
@@ -27,6 +27,16 @@
{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_REDIS() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GNOCCHI() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_PANKO() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CEILOMETER() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_AODH() }}
+
{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
index 82d1b95..07cfef8 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
@@ -5,8 +5,6 @@
{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
-{{ SHARED_SL.MACRO_INSTALL_GLUSTERFS_CLIENT() }}
-
{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
index 509edbe..26456f7 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
@@ -31,6 +31,9 @@
{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
template:
@@ -59,6 +62,9 @@
default_{{ HOSTNAME_MTR01 }}: +86
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_MDB01 }}: +97
+ default_{{ HOSTNAME_MDB02 }}: +98
+ default_{{ HOSTNAME_MDB03 }}: +99
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
default_{{ HOSTNAME_SHARE01 }}: +204
@@ -86,6 +92,9 @@
default_{{ HOSTNAME_MTR01 }}: +86
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_MDB01 }}: +97
+ default_{{ HOSTNAME_MDB02 }}: +98
+ default_{{ HOSTNAME_MDB03 }}: +99
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
default_{{ HOSTNAME_SHARE01 }}: +204
@@ -113,6 +122,9 @@
default_{{ HOSTNAME_MTR01 }}: +86
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_MDB01 }}: +97
+ default_{{ HOSTNAME_MDB02 }}: +98
+ default_{{ HOSTNAME_MDB03 }}: +99
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
default_{{ HOSTNAME_SHARE01 }}: +204
@@ -140,6 +152,9 @@
default_{{ HOSTNAME_MTR01 }}: +86
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_MDB01 }}: +97
+ default_{{ HOSTNAME_MDB02 }}: +98
+ default_{{ HOSTNAME_MDB03 }}: +99
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
default_{{ HOSTNAME_SHARE01 }}: +204
@@ -330,6 +345,93 @@
interfaces: *interfaces
network_config: *network_config
+ - name: {{ HOSTNAME_MDB01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MDB02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MDB03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
- name: {{ HOSTNAME_MON01 }}
role: salt_minion
params:
@@ -705,29 +807,3 @@
interfaces: *all_interfaces
network_config: *all_network_config
-
- - name: {{ HOSTNAME_SHARE01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
index 622b371..64031ea 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
@@ -227,4 +227,9 @@
openstack_share_hostname: share
openstack_share_node01_hostname: share01
openstack_octavia_enabled: 'True'
+ octavia_hm_bind_ip: 192.168.1.12
+ octavia_lb_mgmt_cidr: 192.168.1.0/24
+ octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
+ octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
+
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml
index a6b2cc6..a39d636 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml
@@ -14,6 +14,8 @@
{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml
index 4a4dfac..06946d4 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml
@@ -13,6 +13,8 @@
{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
index 39099ee..bfcc3fd 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
@@ -227,5 +227,13 @@
openstack_mysql_x509_enabled: 'False'
rabbitmq_ssl_enabled: 'False'
openstack_rabbitmq_x509_enabled: 'False'
- tenant_telemetry_enabled: 'False'
- gnocchi_aggregation_storage: file
+ tenant_telemetry_enabled: 'True'
+ gnocchi_aggregation_storage: ceph
+ openstack_telemetry_address: 172.16.10.83
+ openstack_telemetry_hostname: mdb
+ openstack_telemetry_node01_address: 172.16.10.84
+ openstack_telemetry_node01_hostname: mdb01
+ openstack_telemetry_node02_address: 172.16.10.85
+ openstack_telemetry_node02_hostname: mdb02
+ openstack_telemetry_node03_address: 172.16.10.86
+ openstack_telemetry_node03_hostname: mdb03
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml
index fab7c18..fcce951 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml
@@ -12,6 +12,8 @@
{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
index 5d19c16..636187b 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
@@ -31,6 +31,16 @@
{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_REDIS() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GNOCCHI() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_PANKO() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CEILOMETER() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_AODH() }}
+
{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml
index 727758e..fe31142 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml
@@ -29,6 +29,9 @@
{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd2.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
template:
devops_settings:
@@ -57,6 +60,9 @@
default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_MDB01 }}: +84
+ default_{{ HOSTNAME_MDB02 }}: +85
+ default_{{ HOSTNAME_MDB03 }}: +86
ip_ranges:
dhcp: [+70, -10]
@@ -82,6 +88,9 @@
default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_MDB01 }}: +84
+ default_{{ HOSTNAME_MDB02 }}: +85
+ default_{{ HOSTNAME_MDB03 }}: +86
ip_ranges:
dhcp: [+70, -10]
@@ -107,6 +116,9 @@
default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_MDB01 }}: +84
+ default_{{ HOSTNAME_MDB02 }}: +85
+ default_{{ HOSTNAME_MDB03 }}: +86
ip_ranges:
dhcp: [+10, -10]
@@ -132,6 +144,9 @@
default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_MDB01 }}: +84
+ default_{{ HOSTNAME_MDB02 }}: +85
+ default_{{ HOSTNAME_MDB03 }}: +86
ip_ranges:
dhcp: [+130, +230]
@@ -313,6 +328,8 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
+
+
interfaces: *interfaces
network_config: *network_config
@@ -342,6 +359,93 @@
interfaces: *interfaces
network_config: *network_config
+ - name: {{ HOSTNAME_MDB01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MDB02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MDB03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
- name: {{ HOSTNAME_CMN02 }}
role: salt_minion
params:
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml
index f279b44..28831da 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml
@@ -169,4 +169,38 @@
ens3:
role: single_dhcp
ens4:
- role: single_ctl
\ No newline at end of file
+ role: single_ctl
+
+
+ mdb01.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: openstack_telemetry_node01
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mdb02.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: openstack_telemetry_node02
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mdb03.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: openstack_telemetry_node03
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml
similarity index 75%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml
index b860da9..1cec753 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml
@@ -1,9 +1,12 @@
default_context:
+ barbican_backend: dogtag
+ barbican_enabled: 'True'
+ auditd_enabled: 'True'
bmk_enabled: 'False'
ceph_enabled: 'False'
cicd_enabled: 'False'
- cluster_domain: virtual-mcp-ocata-dvr.local
- cluster_name: virtual-mcp-ocata-dvr
+ cluster_domain: cookied-mcp-queens-dvr-ssl-barbican.local
+ cluster_name: cookied-mcp-queens-dvr-ssl-barbican
compute_bond_mode: active-backup
compute_primary_first_nic: eth1
compute_primary_second_nic: eth2
@@ -41,7 +44,8 @@
local_repositories: 'False'
maas_deploy_address: 192.168.10.90
maas_hostname: cfg01
- mcp_version: stable
+ maas_enabled: 'False'
+ mcp_version: proposed
offline_deployment: 'False'
opencontrail_enabled: 'False'
openstack_benchmark_node01_address: 172.16.10.95
@@ -103,7 +107,12 @@
openstack_proxy_node02_address: 172.16.10.122
openstack_proxy_node02_hostname: prx02
openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: ocata
+ openstack_dns_hostname: dns
+ openstack_dns_node01_address: 172.16.10.113
+ openstack_dns_node01_hostname: dns01
+ openstack_dns_node02_address: 172.16.10.114
+ openstack_dns_node02_hostname: dns02
+ openstack_version: queens
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
oss_webhook_app_id: '24'
@@ -150,34 +159,33 @@
salt_master_management_address: 192.168.10.90
shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
+ stacklight_enabled: 'False'
+ stacklight_log_address: 172.16.10.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 172.16.10.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 172.16.10.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 172.16.10.63
+ stacklight_log_node03_hostname: log03
stacklight_monitor_address: 172.16.10.70
stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
+ stacklight_monitor_node01_address: 172.16.10.71
stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
+ stacklight_monitor_node02_address: 172.16.10.72
stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
+ stacklight_monitor_node03_address: 172.16.10.73
stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
+ stacklight_telemetry_address: 172.16.10.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 172.16.10.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 172.16.10.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 172.16.10.88
+ stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
+ stacklight_long_term_storage_type: influxdb
static_ips_on_deploy_network_enabled: 'False'
tenant_network_gateway: 10.1.0.1
tenant_network_netmask: 255.255.255.0
@@ -185,3 +193,32 @@
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
+ rsync_fernet_rotation: 'True'
+ compute_padding_with_zeros: False
+ designate_backend: bind
+ designate_enabled: 'False'
+ nova_vnc_tls_enabled: 'True'
+ galera_ssl_enabled: 'True'
+ openstack_mysql_x509_enabled: 'True'
+ rabbitmq_ssl_enabled: 'True'
+ openstack_rabbitmq_x509_enabled: 'True'
+ openstack_internal_protocol: 'https'
+ tenant_telemetry_enabled: 'False'
+ gnocchi_aggregation_storage: file
+ manila_enabled: 'False'
+ manila_share_backend: 'lvm'
+ manila_lvm_volume_name: 'manila-volume'
+ manila_lvm_devices: '/dev/vdc'
+ openstack_share_address: 172.16.10.203
+ openstack_share_node01_address: 172.16.10.204
+ openstack_share_node01_deploy_address: 192.168.10.204
+ openstack_share_hostname: share
+ openstack_share_node01_hostname: share01
+ openstack_barbican_address: 172.16.10.44
+ openstack_barbican_hostname: kmn
+ openstack_barbican_node01_address: 172.16.10.45
+ openstack_barbican_node01_hostname: kmn01
+ openstack_barbican_node02_address: 172.16.10.46
+ openstack_barbican_node02_hostname: kmn02
+ openstack_barbican_node03_address: 172.16.10.47
+ openstack_barbican_node03_hostname: kmn03
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-environment.yaml
new file mode 100644
index 0000000..7102e9c
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-environment.yaml
@@ -0,0 +1,132 @@
+nodes:
+ cfg01.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl01.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - infra_kvm
+ - openstack_control_leader
+ - openstack_database_leader
+ - openstack_message_queue
+ - features_lvm_backend_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl02.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_lvm_backend_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl03.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_lvm_backend_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kmn01.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_barbican_node01
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kmn02.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_barbican_node02
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kmn03.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_barbican_node03
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx01.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - features_lvm_backend_volume_vdb
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw01.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/core.yaml
new file mode 100644
index 0000000..4efe25c
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/core.yaml
@@ -0,0 +1,19 @@
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/openstack.yaml
new file mode 100644
index 0000000..5a2bdac
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/openstack.yaml
@@ -0,0 +1,36 @@
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+# Install OpenStack control services
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_DOGTAG() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_BARBICAN() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/salt.yaml
new file mode 100644
index 0000000..edfadef
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/salt.yaml
@@ -0,0 +1,43 @@
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+- description: "Temp fix"
+ cmd: |
+ set -e;
+ apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+ [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
+ . /root/venv-reclass-tools/bin/activate;
+ pip install git+https://github.com/dis-xcom/reclass-tools;
+ reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--meta-data.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--meta-data.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data1604.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml
similarity index 87%
copy from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml
index c64bdc8..c7dd479 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml
@@ -1,9 +1,9 @@
# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% import 'virtual-mcp-ocata-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-mcp-queens-dvr-ssl-barbican/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
aliases:
@@ -12,23 +12,23 @@
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-queens-dvr-ssl-barbican') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KMN01 = os_env('HOSTNAME_KMN01', 'kmn01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KMN02 = os_env('HOSTNAME_KMN02', 'kmn02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KMN03 = os_env('HOSTNAME_KMN03', 'kmn03.' + DOMAIN_NAME) %}
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'cookied-mcp-queens-dvr-ssl-barbican_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
private-pool01:
@@ -43,11 +43,11 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_KMN01 }}: +45
+ default_{{ HOSTNAME_KMN02 }}: +46
+ default_{{ HOSTNAME_KMN03 }}: +47
ip_ranges:
dhcp: [+90, -10]
@@ -63,11 +63,11 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_KMN01 }}: +45
+ default_{{ HOSTNAME_KMN02 }}: +46
+ default_{{ HOSTNAME_KMN03 }}: +47
ip_ranges:
dhcp: [+90, -10]
@@ -83,11 +83,11 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_KMN01 }}: +45
+ default_{{ HOSTNAME_KMN02 }}: +46
+ default_{{ HOSTNAME_KMN03 }}: +47
ip_ranges:
dhcp: [+10, -10]
@@ -103,14 +103,10 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
- dhcp: [+10, -10]
-
+ dhcp: [+130, +220]
groups:
- name: default
@@ -150,11 +146,10 @@
external:
address_pool: external-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
-
group_volumes:
- name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
@@ -162,11 +157,8 @@
- name: cfg01_day01_image # Pre-configured day01 image
source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
+ - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
+ source_image: !os_env MCP_IMAGE_PATH1604
format: qcow2
nodes:
@@ -298,11 +290,11 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON01 }}
+ - name: {{ HOSTNAME_KMN01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -324,11 +316,11 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON02 }}
+ - name: {{ HOSTNAME_KMN02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -350,11 +342,11 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON03 }}
+ - name: {{ HOSTNAME_KMN03 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -419,6 +411,9 @@
- name: cinder
capacity: 50
format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -473,6 +468,9 @@
- name: cinder
capacity: 50
format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -488,8 +486,8 @@
- name: {{ HOSTNAME_GTW01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-cookiecutter-mcp-queens-dvr-ssl.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-cookiecutter-mcp-queens-dvr-ssl.yaml
index 8c55d51..dcc8bc5 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-cookiecutter-mcp-queens-dvr-ssl.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-cookiecutter-mcp-queens-dvr-ssl.yaml
@@ -187,6 +187,14 @@
stacklight_version: '2'
stacklight_long_term_storage_type: influxdb
static_ips_on_deploy_network_enabled: 'False'
+ openstack_telemetry_address: 172.16.10.96
+ openstack_telemetry_hostname: mdb
+ openstack_telemetry_node01_address: 172.16.10.97
+ openstack_telemetry_node01_hostname: mdb01
+ openstack_telemetry_node02_address: 172.16.10.98
+ openstack_telemetry_node02_hostname: mdb02
+ openstack_telemetry_node03_address: 172.16.10.99
+ openstack_telemetry_node03_hostname: mdb03
tenant_network_gateway: 10.1.0.1
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: 10.1.0.0/24
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml
index cc6140a..b1c7e3d 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml
@@ -196,3 +196,36 @@
role: bond0_ab_ovs_vxlan_mesh
ens6:
role: bond1_ab_ovs_floating
+
+ mdb01.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_telemetry_node01
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mdb02.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_telemetry_node02
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mdb03.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_telemetry_node03
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/core.yaml
index 8a48f49..e10bccc 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/core.yaml
@@ -12,6 +12,8 @@
{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/openstack.yaml
index 64a3ce0..278b78b 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/openstack.yaml
@@ -27,6 +27,16 @@
{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_REDIS() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GNOCCHI() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_PANKO() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CEILOMETER() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_AODH() }}
+
{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/sl.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/sl.yaml
index 8efefc3..7d65097 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/sl.yaml
@@ -5,8 +5,6 @@
{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
-{{ SHARED_SL.MACRO_INSTALL_GLUSTERFS_CLIENT() }}
-
{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay.yaml
index f39e2a3..f6a8998 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay.yaml
@@ -31,6 +31,9 @@
{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
template:
@@ -59,6 +62,9 @@
default_{{ HOSTNAME_MTR01 }}: +86
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_MDB01 }}: +97
+ default_{{ HOSTNAME_MDB02 }}: +98
+ default_{{ HOSTNAME_MDB03 }}: +99
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
default_{{ HOSTNAME_SHARE01 }}: +204
@@ -86,6 +92,9 @@
default_{{ HOSTNAME_MTR01 }}: +86
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_MDB01 }}: +97
+ default_{{ HOSTNAME_MDB02 }}: +98
+ default_{{ HOSTNAME_MDB03 }}: +99
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
default_{{ HOSTNAME_SHARE01 }}: +204
@@ -113,6 +122,9 @@
default_{{ HOSTNAME_MTR01 }}: +86
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_MDB01 }}: +97
+ default_{{ HOSTNAME_MDB02 }}: +98
+ default_{{ HOSTNAME_MDB03 }}: +99
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
default_{{ HOSTNAME_SHARE01 }}: +204
@@ -140,6 +152,9 @@
default_{{ HOSTNAME_MTR01 }}: +86
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_MDB01 }}: +97
+ default_{{ HOSTNAME_MDB02 }}: +98
+ default_{{ HOSTNAME_MDB03 }}: +99
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
default_{{ HOSTNAME_SHARE01 }}: +204
@@ -330,6 +345,93 @@
interfaces: *interfaces
network_config: *network_config
+ - name: {{ HOSTNAME_MDB01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MDB02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MDB03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
- name: {{ HOSTNAME_MON01 }}
role: salt_minion
params:
@@ -705,29 +807,3 @@
interfaces: *all_interfaces
network_config: *all_network_config
-
- - name: {{ HOSTNAME_SHARE01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml
index bb1185e..293863a 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml
@@ -12,6 +12,8 @@
{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
index c8875a4..414187b 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
@@ -5,8 +5,6 @@
{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
-{{ SHARED_SL.MACRO_INSTALL_GLUSTERFS_CLIENT() }}
-
{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml
index 6b36603..739c58c 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml
@@ -12,6 +12,8 @@
{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
index 07eb3af..5ab3fd0 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
@@ -5,8 +5,6 @@
{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
-{{ SHARED_SL.MACRO_INSTALL_GLUSTERFS_CLIENT() }}
-
{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml
new file mode 100644
index 0000000..0c7d928
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml
@@ -0,0 +1,55 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-cicd-queens-dvr-sl' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-queens-dvr-sl.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment_context.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+
+ # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ # Workaround of missing reclass.system for dns role
+ # salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: "Temporary workaround: remove cinder-volume from CTL nodes"
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
index f50fe32..7adb184 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
@@ -4,7 +4,7 @@
{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
-{% set LAB_CONFIG_NAME = 'virtual-mcp-ocata-dvr' %}
+{% set LAB_CONFIG_NAME = 'cookied-mcp-ocata-dvr' %}
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
index 8fad4d1..0d0bd6b 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
@@ -4,7 +4,7 @@
{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
-{% set LAB_CONFIG_NAME = 'virtual-mcp-ocata-ovs' %}
+{% set LAB_CONFIG_NAME = 'cookied-mcp-ocata-ovs' %}
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl-barbican.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl-barbican.yaml
new file mode 100644
index 0000000..4b86b85
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl-barbican.yaml
@@ -0,0 +1,51 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-pike-dvr-ssl-barbican' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl-barbican.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl-barbican.yaml
new file mode 100644
index 0000000..4e5dbc9
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl-barbican.yaml
@@ -0,0 +1,51 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-queens-dvr-ssl-barbican' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml b/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
index b231ced..e2b63dd 100644
--- a/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
+++ b/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
@@ -72,7 +72,7 @@
{%- if os_env('IRONIC_DNSMASQ_HOSTFILE', '') %}
- echo "dhcp-hostsfile=/var/lib/libvirt/dnsmasq/{{ IRONIC_ENV_NAME }}.hostsfile" >> /etc/dnsmasq.conf
- - service dnsmasq restart
+ - service dnsmasq restart && sleep 30
{%- endif %}
# Enable SNAT to allow internet access for deploying nodes using ironic node as a gateway
diff --git a/tcp_tests/templates/shared-ceph.yaml b/tcp_tests/templates/shared-ceph.yaml
index 267e407..ab13cb2 100644
--- a/tcp_tests/templates/shared-ceph.yaml
+++ b/tcp_tests/templates/shared-ceph.yaml
@@ -111,14 +111,14 @@
{%- endmacro %}
{%- macro CONNECT_CEPH_TO_SERVICES() %}
-- description: Connect ceph to glance
+- description: Setup keyring for glance
cmd: |
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
-- description: Connect ceph to cinder and nova
+- description: Setup keyring for cinder and nova
cmd: |
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring;
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
@@ -126,4 +126,13 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
+
+- description: Setup keyring for gnocchi
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@gnocchi:server' match.pillar 'ceph:common' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@gnocchi:server' state.sls ceph.common,ceph.setup.keyring
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
{%- endmacro %}
diff --git a/tcp_tests/templates/shared-core.yaml b/tcp_tests/templates/shared-core.yaml
index 49eadac..ae47e03 100644
--- a/tcp_tests/templates/shared-core.yaml
+++ b/tcp_tests/templates/shared-core.yaml
@@ -76,9 +76,16 @@
- description: Setup glusterfs on primary controller
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server and *01*' state.sls glusterfs.server.setup -b 1
+ -C 'I@glusterfs:server:role:primary' state.sls glusterfs.server.setup -b 1
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 5, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on other nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 5}
skip_fail: false
- description: Check the gluster status
@@ -88,6 +95,20 @@
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Refresh pillar before glusterfs client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:client' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:client' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
{%- endmacro %}
{%- macro MACRO_INSTALL_RABBITMQ() %}
@@ -165,6 +186,24 @@
{%- endmacro %}
+{%- macro MACRO_INSTALL_NGINX() %}
+
+- description: Update certificate files on nginx nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls salt.minion.cert
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Install nginx server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{%- endmacro %}
+
{%- macro MACRO_INSTALL_MEMCACHED() %}
- description: Install memcached on all controllers
diff --git a/tcp_tests/templates/shared-openstack.yaml b/tcp_tests/templates/shared-openstack.yaml
index 01de21d..b48a611 100644
--- a/tcp_tests/templates/shared-openstack.yaml
+++ b/tcp_tests/templates/shared-openstack.yaml
@@ -40,20 +40,6 @@
retry: {count: 1, delay: 15}
skip_fail: false
-- description: Mount glusterfs.client volumes (resuires created 'keystone' system user)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls glusterfs.client -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Populate keystone services/tenants/admins
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:client' state.sls keystone.client
@@ -85,13 +71,6 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Mount glusterfs.client volumes (resuires created 'glusterfs' system user)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Check glance image-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C "I@keystone:server" cmd.run ". /root/keystonercv3;
@@ -103,13 +82,6 @@
{%- macro MACRO_INSTALL_NOVA() %}
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
- description: Install nova service on primary node
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C "I@nova:controller and *01*" state.sls nova.controller
@@ -174,11 +146,11 @@
skip_fail: false
{%- if INSTALL_VOLUME %}
-- description: Install cinder volume
+- description: Install cinder volume, PROD-24485 set retry 2
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@cinder:volume' state.sls cinder
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
{%- endif %}
@@ -285,7 +257,27 @@
{%- endmacro %}
{%- macro MACRO_INSTALL_BARBICAN() %}
-# TO DO
+
+- description: Install barbican server
+ cmd: |
+ salt -C 'I@barbican:server:role:primary' state.sls barbican.server;
+ salt -C 'I@barbican:server' state.sls barbican.server;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_DOGTAG() %}
+
+- description: Install dogtag server
+ cmd: |
+ salt -C 'I@dogtag:server:role:master' state.sls dogtag.server;
+ salt -C 'I@dogtag:server' state.sls dogtag.server;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
{%- endmacro %}
{%- macro MACRO_INSTALL_IRONIC() %}
@@ -340,6 +332,19 @@
{%- endmacro %}
{%- macro MACRO_INSTALL_OCTAVIA_API() %}
+
+- description: Execute glance client to upload octavia image
+ cmd: salt -C 'I@glance:client' state.sls glance.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Execute neutron client to create octavia resources
+ cmd: salt -C 'I@neutron:client' state.sls neutron.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
- description: Install octavia api service on primary node
cmd: salt -C 'I@octavia:api:role:primary' state.sls octavia.api
node_name: {{ HOSTNAME_CFG01 }}
@@ -387,10 +392,6 @@
{%- endmacro %}
-{%- macro MACRO_INSTALL_DOGTAG() %}
-# TO DO
-{%- endmacro %}
-
{%- macro MACRO_INSTALL_COMPUTE(CELL_MAPPING=false) %}
# Install compute node
- description: Apply formulas for compute node
diff --git a/tcp_tests/templates/shared-sl.yaml b/tcp_tests/templates/shared-sl.yaml
index 413162b..34c42f0 100644
--- a/tcp_tests/templates/shared-sl.yaml
+++ b/tcp_tests/templates/shared-sl.yaml
@@ -67,14 +67,14 @@
skip_fail: false
{%- endmacro %}
-{%- macro MACRO_INSTALL_GLUSTERFS_CLIENT() %}
-- description: Install glusterfs client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:client' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-{%- endmacro %}
+#{#%- macro MACRO_INSTALL_GLUSTERFS_CLIENT() %#}
+#- description: Install glusterfs client
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+# -C 'I@glusterfs:client' state.sls glusterfs.client
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 2, delay: 15}
+# skip_fail: false
+#{#%- endmacro %#}
{%- macro MACRO_INSTALL_MONGODB() %}
# Install slv2 infra
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/core.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/core.yaml
deleted file mode 100644
index 9d07fb0..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/core.yaml
+++ /dev/null
@@ -1,100 +0,0 @@
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/core.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/core.yaml
deleted file mode 100644
index 85afc89..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/core.yaml
+++ /dev/null
@@ -1,100 +0,0 @@
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
deleted file mode 100644
index 72bd92a..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/virtual-mcp-ocata-ovs/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':', 1) %}
-- description: Override cluster parameters
- cmd: |
- salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endfor %}
-
-- description: Refresh pillar
- cmd: salt '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endif %}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-# WORKAROUND PROD-21071
-- description: Set correct pin for openstack repository
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin: release l=ocata/Pin: release l=xenial\/openstack\/ocata testing/g' /etc/apt/preferences.d/mirantis_openstack"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/core.yaml
index 7ac814c..3a24e5e 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/core.yaml
@@ -1,118 +1,12 @@
{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml
index 965d297..5c35319 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml
@@ -1,118 +1,12 @@
{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/core.yaml
index 3ec4687..a2d4be8 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/core.yaml
@@ -1,118 +1,12 @@
{% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/core.yaml
index 57968ee..49b016a 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/core.yaml
@@ -1,117 +1,12 @@
{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/core.yaml
index cbde3f0..5c35319 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/core.yaml
@@ -1,117 +1,12 @@
{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/core.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/core.yaml
index c0e9b0e..3aed7e6 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/core.yaml
@@ -1,117 +1,12 @@
{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/core.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/core.yaml
index 368f2bf..cf4a90a 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/core.yaml
@@ -1,117 +1,12 @@
{% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/core.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/core.yaml
index abaa50d..af8778d 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/core.yaml
@@ -1,117 +1,12 @@
{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-trusty/core.yaml b/tcp_tests/templates/virtual-mcp-trusty/core.yaml
index 938b11f..42f7e52 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-trusty/core.yaml
@@ -2,99 +2,11 @@
{% import 'shared-core.yaml' as SHARED_CORE with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/core.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/core.yaml
index 8ba7026..0a43183 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/core.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/core.yaml
@@ -1,5 +1,6 @@
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
- description: remove apparmor
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -8,118 +9,11 @@
retry: {count: 1, delay: 10}
skip_fail: true
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml
index 6546438..68a1220 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml
@@ -5,14 +5,14 @@
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
# Install OpenStack control services
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'virtual-offline-pike-ovs-dpdk') %}
{% import 'shared-backup-restore.yaml' as BACKUP with context %}
{% import 'shared-salt.yaml' as SHARED with context %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker ' + REPOSITORY_SUITE + ' stable') %}
+{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/' + REPOSITORY_SUITE + '/docker/xenial xenial stable') %}
{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml
index 9f28ba9..fc45c30 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml
@@ -12,9 +12,27 @@
{% import 'shared-salt.yaml' as SHARED with context %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker ' + REPOSITORY_SUITE + ' stable') %}
+{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/' + REPOSITORY_SUITE + '/docker/xenial xenial stable') %}
- # Upload cirros image
+- description: Run 'openssh' formula on cfg01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@salt:master' state.sls openssh &&
+ salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@salt:master' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+ yes/' /etc/ssh/sshd_config && service ssh reload"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Configure openssh on all nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system and not cfg01*' state.sls openssh &&
+ salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@linux:system and not cfg01*' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+ yes/' /etc/ssh/sshd_config && service ssh reload"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
- description: Upload cirros image on ctl01
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
@@ -88,7 +106,7 @@
skip_fail: false
- description: Install docker-ce on gtw
- cmd: salt-call cmd.run 'apt-get install docker-ce -y'
+ cmd: salt-call cmd.run 'apt-get install docker-ce -y --allow-unauthenticated'
node_name: {{ HOSTNAME_GTW01 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -102,13 +120,13 @@
skip_fail: false
- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
+ cmd: scp -o StrictHostKeyChecking=no ctl01:/root/keystonercv3 /root
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
+ cmd: scp -o StrictHostKeyChecking=no /root/keystonercv3 gtw01:/root
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh
index 420b805..bb1316a 100755
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh
@@ -37,9 +37,9 @@
export SALT_REPOSITORY = "deb [arch=amd64] http://mirror.mirantis.local.test/" + REPOSITORY_SUITE+ "/saltstack-" + SALT_VERSION+ "/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main"
#export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2017.7 main"
export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main universe restricted"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main universe restricted"
+export UBUNTU_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
+export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main restricted universe"
+export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main restricted universe"
cd tcp_tests
py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
index d671337..828a14f 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
@@ -18,6 +18,7 @@
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_VS with context %}
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.local.test/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
@@ -40,6 +41,7 @@
retry: {count: 1, delay: 10}
skip_fail: false
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_VS) }}
{{ VSWITCH.MACRO_CONFIGURE_VSWITCH(HOSTNAME_VS, VSWITCH_IP) }}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/vswitch-config.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/vswitch-config.yaml
index 705e9be..df9fd73 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/vswitch-config.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/vswitch-config.yaml
@@ -1,6 +1,21 @@
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
+{% set OPENSTACK_PIKE_REPOSITORY = os_env('OPENSTACK_PIKE_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.local.test/" + REPOSITORY_SUITE + "/openstack-pike/xenial/ xenial main") %}
+{% set UBUNTU_KEY_SERVER = os_env('UBUNTU_KEY_SERVER', 'keyserver.ubuntu.com') %}
+{% set UBUNTU_KEY_ID = os_env('UBUNTU_KEY_ID', '0E08A149DE57BFBE') %}
+
{%- macro MACRO_CONFIGURE_VSWITCH(NODE_NAME, IP) %}
{#################################################}
+- description: 'Enable openstack repo for needed packages '
+ cmd: |
+ apt-key adv --keyserver "{{UBUNTU_KEY_SERVER}}" --recv-keys "{{ UBUNTU_KEY_ID}}"
+ echo "{{ OPENSTACK_PIKE_REPOSITORY }}" > /etc/apt/sources.list.d/openstack.list
+ eatmydata apt-get clean;
+ apt-get update;
+ sync;
+ node_name: {{ NODE_NAME }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
- description: 'Install openvswitch-vtep package and configure it'
cmd: |
@@ -8,9 +23,9 @@
ifconfig ens4 up
apt-get update
- apt-get -y install openvswitch-switch
+ apt-get -y install openvswitch-switch --allow-unauthenticated
service openvswitch-switch stop
- apt-get -y install openvswitch-vtep bridge-utils
+ apt-get -y install openvswitch-vtep bridge-utils --allow-unauthenticated
ovsdb-tool create /etc/openvswitch/vtep.db /usr/share/openvswitch/vtep.ovsschema
ovsdb-tool create /etc/openvswitch/vswitch.db /usr/share/openvswitch/vswitch.ovsschema
@@ -67,7 +82,13 @@
skip_fail: false
- description: 'Refresh pillar data after L2GW enablement'
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar; sleep 15
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: 'Sync all'
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -75,7 +96,7 @@
- description: 'Check L2GW is enabled'
cmd: salt 'gtw01*' pillar.get neutron:gateway:l2gw:enabled | grep True
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 3, delay: 5}
skip_fail: false
{%- endmacro %}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/core.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/core.yaml
index b3a1404..4d69c89 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/core.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/core.yaml
@@ -1,5 +1,7 @@
{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
- description: remove apparmor
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
'*' cmd.run 'service apparmor stop; service apparmor teardown; update-rc.d -f apparmor remove; apt-get -y remove apparmor'
@@ -7,118 +9,11 @@
retry: {count: 1, delay: 10}
skip_fail: true
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
index 25fd59f..d362573 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
@@ -6,12 +6,13 @@
{% from 'virtual-offline-pike-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'virtual-offline-pike-ovs') %}
{% import 'shared-backup-restore.yaml' as BACKUP with context %}
{% import 'shared-salt.yaml' as SHARED with context %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker ' + REPOSITORY_SUITE + ' stable') %}
+{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/' + REPOSITORY_SUITE + '/docker/xenial xenial stable') %}
+
# Install OpenStack control services
{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh b/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh
index d1e0380..a33e90f 100755
--- a/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh
@@ -37,9 +37,9 @@
export SALT_REPOSITORY = "deb [arch=amd64] http://mirror.mirantis.local.test/" + REPOSITORY_SUITE+ "/saltstack-" + SALT_VERSION+ "/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main"
#export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2017.7 main"
export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main universe restricted"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main universe restricted"
+export UBUNTU_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
+export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main restricted universe"
+export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main restricted universe"
cd tcp_tests
py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/templates/virtual-offline-ssl/core.yaml b/tcp_tests/templates/virtual-offline-ssl/core.yaml
index 0c75bb4..c08b0cd 100644
--- a/tcp_tests/templates/virtual-offline-ssl/core.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/core.yaml
@@ -1,5 +1,7 @@
{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
- description: remove apparmor
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
'*' cmd.run 'service apparmor stop; service apparmor teardown; update-rc.d -f apparmor remove; apt-get -y remove apparmor'
@@ -7,118 +9,11 @@
retry: {count: 1, delay: 10}
skip_fail: true
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-offline-ssl/openstack.yaml b/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
index 23541d0..e84ed31 100644
--- a/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
@@ -15,8 +15,7 @@
{% import 'shared-salt.yaml' as SHARED with context %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker ' + REPOSITORY_SUITE + ' stable') %}
-
+{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/' + REPOSITORY_SUITE + '/docker/xenial xenial stable') %}
# Install OpenStack control services
diff --git a/tcp_tests/templates/virtual-offline-ssl/run_test.sh b/tcp_tests/templates/virtual-offline-ssl/run_test.sh
index 1695eae..747f959 100755
--- a/tcp_tests/templates/virtual-offline-ssl/run_test.sh
+++ b/tcp_tests/templates/virtual-offline-ssl/run_test.sh
@@ -35,10 +35,11 @@
export FORMULA_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial ${REPOSITORY_SUITE} salt extra"
export FORMULA_GPG="http://apt.mirantis.local.test/public.gpg"
export SALT_REPOSITORY = "deb [arch=amd64] http://mirror.mirantis.local.test/" + REPOSITORY_SUITE+ "/saltstack-" + SALT_VERSION+ "/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main"
+#export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2017.7 main"
export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main universe restricted"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main universe restricted"
+export UBUNTU_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
+export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main restricted universe"
+export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main restricted universe"
cd tcp_tests
py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/core.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/core.yaml
index 202a7e2..11a7665 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/core.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/core.yaml
@@ -1,117 +1,12 @@
{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/tests/system/test_3rdparty_suites.py b/tcp_tests/tests/system/test_3rdparty_suites.py
index d545532..78583af 100644
--- a/tcp_tests/tests/system/test_3rdparty_suites.py
+++ b/tcp_tests/tests/system/test_3rdparty_suites.py
@@ -33,7 +33,7 @@
@pytest.mark.parametrize("_", [settings.ENV_NAME])
@pytest.mark.run_tempest
def test_run_tempest(self, tempest_actions, show_step, _):
- """Runner for Juniper contrail-tests
+ """Runner for Openstack tempest tests
Scenario:
1. Run tempest
@@ -83,11 +83,16 @@
k8s_actions.run_conformance()
@pytest.mark.grab_versions
+ @pytest.mark.extract(container_system='docker',
+ extract_from='mirantis/virtlet',
+ files_to_extract=['conformance_virtlet_result.xml'])
@pytest.mark.grab_k8s_results(name=['virtlet_conformance.log',
- 'report.xml'])
+ 'conformance_virtlet_result.xml'])
@pytest.mark.parametrize("_", [settings.ENV_NAME])
@pytest.mark.k8s_conformance_virtlet
def test_run_k8s_conformance_virtlet(self, show_step, config, k8s_actions,
k8s_logs, _):
"""Test run of k8s virtlet conformance tests"""
- k8s_actions.run_virtlet_conformance()
+ config.k8s.run_extended_virtlet_conformance = True
+ k8s_actions.run_virtlet_conformance(
+ report_name="conformance_virtlet_result.xml")
diff --git a/tcp_tests/tests/system/test_failover_k8s.py b/tcp_tests/tests/system/test_failover_k8s.py
index a334a42..b872c36 100644
--- a/tcp_tests/tests/system/test_failover_k8s.py
+++ b/tcp_tests/tests/system/test_failover_k8s.py
@@ -21,7 +21,7 @@
class TestFailoverK8s(object):
- @pytest.mark.grap_versions
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
def test_k8s_master_vip_migration(self, show_step, k8s_deployed, underlay,
k8s_actions, core_actions,
diff --git a/tcp_tests/tests/system/test_install_k8s.py b/tcp_tests/tests/system/test_install_k8s.py
index 8066cd9..ba5a81d 100644
--- a/tcp_tests/tests/system/test_install_k8s.py
+++ b/tcp_tests/tests/system/test_install_k8s.py
@@ -106,7 +106,11 @@
# todo (tleontovich) add asserts here and extend the tests
# with acceptance criteria
show_step(10)
+
# Run SL component tests
+ stacklight_deployed.setup_sl_functional_tests(
+ 'cfg01',
+ )
stacklight_deployed.run_sl_functional_tests(
'cfg01',
'/root/stacklight-pytest/stacklight_tests/',
@@ -163,6 +167,9 @@
stacklight_deployed.check_prometheus_targets(mon_nodes)
show_step(6)
# Run SL component tests
+ stacklight_deployed.setup_sl_functional_tests(
+ 'cfg01',
+ )
stacklight_deployed.run_sl_functional_tests(
'cfg01',
'/root/stacklight-pytest/stacklight_tests/',
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index 7e726cb..a34496b 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -394,7 +394,7 @@
result)
# Prepare resources before test
- steps_path = config.openstack_deploy.penstack_resources_steps_path
+ steps_path = config.openstack_deploy.openstack_resources_steps_path
commands = underlay.read_template(steps_path)
openstack_actions.install(commands)
diff --git a/tcp_tests/tests/system/test_install_mcp_queens.py b/tcp_tests/tests/system/test_install_mcp_queens.py
new file mode 100644
index 0000000..b644d26
--- /dev/null
+++ b/tcp_tests/tests/system/test_install_mcp_queens.py
@@ -0,0 +1,220 @@
+# Copyright 2018 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pytest
+
+from tcp_tests import logger
+from tcp_tests import settings
+
+LOG = logger.logger
+
+
+class TestMcpInstallQueensCeph(object):
+ """Test class for testing mcp queens ceph deploy"""
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.cookied_mcp_queens_dvr_ceph
+ def test_cookied_mcp_queens_dvr_ceph(self, underlay,
+ openstack_deployed,
+ tempest_actions):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run tempest
+
+ """
+ openstack_deployed._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+ if settings.RUN_TEMPEST:
+ tempest_actions.prepare_and_run_tempest()
+
+ LOG.info("*************** DONE **************")
+
+
+class TestMcpInstallQueensOvs(object):
+ """Test class for testing mcp queens ovs deploy"""
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.cookied_mcp_queens_ovs
+ def test_cookied_mcp_queens_ovs(self, underlay,
+ openstack_deployed,
+ stacklight_deployed,
+ tempest_actions):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run tempest
+
+ """
+ openstack_deployed._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+ if settings.RUN_TEMPEST:
+ tempest_actions.prepare_and_run_tempest()
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.cookied_mcp_queens_ovs
+ def test_cookied_mcp_queens_ovs_sl(self, underlay,
+ openstack_deployed,
+ stacklight_deployed):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run stacklight tests
+
+ """
+ openstack_deployed._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ # Run SL component tests
+ stacklight_deployed.run_sl_functional_tests(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus',
+ 'test_alerts.py')
+
+ # Download report
+ stacklight_deployed.download_sl_test_report(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/report.xml')
+ LOG.info("*************** DONE **************")
+
+
+class TestMcpInstallQueensDvr(object):
+ """Test class for testing mcp queens dvr deploy"""
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.cookied_mcp_queens_dvr
+ def test_cookied_mcp_queens_dvr(self, underlay,
+ openstack_deployed,
+ stacklight_deployed,
+ tempest_actions):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run tempest
+
+ """
+ openstack_deployed._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+ if settings.RUN_TEMPEST:
+ tempest_actions.prepare_and_run_tempest()
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.cookied_mcp_queens_dvr
+ def test_cookied_mcp_queens_dvr_sl(self, underlay,
+ openstack_deployed,
+ stacklight_deployed):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run stacklight tests
+
+ """
+ openstack_deployed._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ # Run SL component tests
+ stacklight_deployed.run_sl_functional_tests(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus',
+ 'test_alerts.py')
+
+ # Download report
+ stacklight_deployed.download_sl_test_report(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/report.xml')
+ LOG.info("*************** DONE **************")
+
+
+class TestMcpInstallQueensDvrSsl(object):
+ """Test class for testing mcp queens dvr deploy"""
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.cookied_mcp_queens_dvr_ssl
+ def test_cookied_mcp_queens_dvr_ssl(self, underlay,
+ openstack_deployed,
+ stacklight_deployed,
+ tempest_actions):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run tempest
+
+ """
+ openstack_deployed._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+ if settings.RUN_TEMPEST:
+ tempest_actions.prepare_and_run_tempest()
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.cookied_mcp_queens_dvr_ssl
+ def test_cookied_mcp_queens_dvr_ssl_sl(self, underlay,
+ openstack_deployed,
+ stacklight_deployed):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run stacklight tests
+
+ """
+ openstack_deployed._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ # Run SL component tests
+ stacklight_deployed.run_sl_functional_tests(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus',
+ 'test_alerts.py')
+
+ # Download report
+ stacklight_deployed.download_sl_test_report(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/report.xml')
+ LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_k8s_actions.py b/tcp_tests/tests/system/test_k8s_actions.py
index 612367c..6467a8a 100644
--- a/tcp_tests/tests/system/test_k8s_actions.py
+++ b/tcp_tests/tests/system/test_k8s_actions.py
@@ -22,6 +22,7 @@
from tcp_tests import settings
from tcp_tests.managers.k8s import read_yaml_file
+from tcp_tests.managers.jenkins.client import JenkinsClient
LOG = logger.logger
@@ -94,7 +95,7 @@
show_step(1)
k8s_deployed.start_k8s_cncf_verification()
- @pytest.mark.grap_versions
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
def test_k8s_chain_update(self, show_step, underlay, config, k8s_deployed,
k8s_chain_update_log_helper):
@@ -141,8 +142,9 @@
show_step(8)
sample.delete()
- @pytest.mark.grap_versions
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
+ @pytest.mark.k8s_metallb
def test_k8s_metallb(self, show_step, config, k8s_deployed):
"""Enable metallb in cluster and do basic tests
@@ -157,7 +159,7 @@
8. Delete deployments
"""
show_step(1)
- if not config.k8s_deploy.kubernetes_metallb_enabled:
+ if not k8s_deployed.is_metallb_enabled:
pytest.skip("Test requires metallb addon enabled")
show_step(2)
@@ -195,7 +197,7 @@
for sample in samples:
sample.delete()
- @pytest.mark.grap_versions
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
@pytest.mark.k8s_genie
def test_k8s_genie_flannel(self, show_step, config,
@@ -314,8 +316,9 @@
multicni_pod.delete()
nocni_pod.delete()
- @pytest.mark.grap_versions
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
+ @pytest.mark.k8s_dashboard
def test_k8s_dashboard(self, show_step, config,
salt_deployed, k8s_deployed):
"""Test dashboard setup
@@ -389,8 +392,9 @@
for namespace in dashboard_namespaces:
assert namespace['objectMeta']['name'] in namespaces_names_list
- @pytest.mark.grap_versions
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
+ @pytest.mark.k8s_ingress_nginx
def test_k8s_ingress_nginx(self, show_step, config,
salt_deployed, k8s_deployed):
"""Test ingress-nginx configured and working with metallb
@@ -405,9 +409,9 @@
6. Try to reach test1 and test2 deployment services endpoints
"""
show_step(1)
- if not config.k8s_deploy.kubernetes_metallb_enabled:
+ if not k8s_deployed.is_metallb_enabled:
pytest.skip("Test requires metallb addon enabled")
- if not config.k8s_deploy.kubernetes_ingressnginx_enabled:
+ if not k8s_deployed.is_ingress_nginx_enabled:
pytest.skip("Test requires ingress-nginx addon enabled")
show_step(2)
@@ -459,3 +463,41 @@
req2 = requests.get(ingress_address + "/test2", verify=False)
assert req2.status_code == 200
assert 'dep-ingress-2' in req2.text
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_k8s_cicd_upgrade(self, show_step, config,
+ salt_deployed, k8s_deployed):
+ """Test k8s upgrade cicd pipeline
+
+ Scenario:
+ 1. Setup Kubernetes+CICD cluster
+ 2. Start deploy-k8s-upgrade job in jenkins
+ 3. Wait for job to end
+ """
+ show_step(1)
+ jenkins_info = salt_deployed.get_pillar(
+ tgt='cid*1*', pillar="jenkins:client:master")[0].values()[0]
+
+ salt_api = salt_deployed.get_pillar(
+ tgt='cid*1*', pillar="_param:jenkins_salt_api_url")[0].values()[0]
+
+ show_step(2)
+ jenkins = JenkinsClient(
+ host='http://{host}:{port}'.format(**jenkins_info),
+ username=jenkins_info['username'],
+ password=jenkins_info['password'])
+
+ params = jenkins.make_defults_params('deploy-k8s-upgrade')
+ params['SALT_MASTER_URL'] = salt_api
+ params['SALT_MASTER_CREDENTIALS'] = 'salt'
+ params['CONFORMANCE_RUN_AFTER'] = True
+ params['CONFORMANCE_RUN_BEFORE'] = True
+ build = jenkins.run_build('deploy-k8s-upgrade', params)
+
+ show_step(3)
+ jenkins.wait_end_of_build(
+ name=build[0], build_id=build[1], timeout=3600 * 4)
+ result = jenkins.build_info(
+ name=build[0], build_id=build[1])['result']
+ assert result == 'SUCCESS', "k8s upgrade job has been failed"
diff --git a/tcp_tests/tests/system/test_offline.py b/tcp_tests/tests/system/test_offline.py
index 05a8deb..6c083cb 100644
--- a/tcp_tests/tests/system/test_offline.py
+++ b/tcp_tests/tests/system/test_offline.py
@@ -430,13 +430,7 @@
cmd='salt "*" ssh.set_auth_key ubuntu '
'"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
- salt_nodes = salt_deployed.get_ssh_data()
- nodes_list = \
- [node for node in salt_nodes
- if not any(node['node_name'] == n['node_name']
- for n in config.underlay.ssh)]
- config.underlay.ssh = config.underlay.ssh + nodes_list
- underlay.add_config_ssh(nodes_list)
+ salt_deployed.update_ssh_data_from_minions()
time.sleep(120) # debug sleep
cmd = "salt '*' test.ping"
diff --git a/tcp_tests/tests/system/test_virtlet_actions.py b/tcp_tests/tests/system/test_virtlet_actions.py
index 83fd33a..d3b6c27 100644
--- a/tcp_tests/tests/system/test_virtlet_actions.py
+++ b/tcp_tests/tests/system/test_virtlet_actions.py
@@ -25,6 +25,7 @@
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
+ @pytest.mark.k8s_virtlet
def test_virtlet_create_delete_vm(self, show_step, config, k8s_deployed):
"""Test for deploying an mcp environment with virtlet
@@ -51,6 +52,7 @@
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
+ @pytest.mark.k8s_virtlet
def test_vm_resource_quotas(self, show_step, config, k8s_deployed):
"""Test for deploying a VM with specific quotas