Merge "Revert "[tcp-qa] add 'test_mcp_os_newton_install'""
diff --git a/README.md b/README.md
index 0f0ebd4..fdf0ebb 100644
--- a/README.md
+++ b/README.md
@@ -35,8 +35,8 @@
LAB_CONFIG_NAME variable maps cluster name from the model repository with
the set of templates in the ./tcp_tests/templates/ folder.
```
-export LAB_CONFIG_NAME=virtual-mcp-ocata-dvr # OVS-DVR with ocata packages
-export LAB_CONFIG_NAME=virtual-mcp-ocata-ovs # OVS-NO-DVR with ocata packages
+export LAB_CONFIG_NAME=cookied-mcp-ocata-dvr # OVS-DVR with ocata packages
+export LAB_CONFIG_NAME=cookied-mcp-ocata-ovs # OVS-NO-DVR with ocata packages
export LAB_CONFIG_NAME=virtual-mcp-ocata-cicd # Operational Support System Tools
export LAB_CONFIG_NAME=virtual-mcp11-dvr # OVS-DVR with neutron packages
export LAB_CONFIG_NAME=virtual-mcp11-ovs # OVS-NO-DVR with neutron packages
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index 3e96c84..9ceea67 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -21,32 +21,30 @@
stage("Install core infrastructure and deploy CICD nodes") {
// steps: env.DRIVETRAIN_STACK_INSTALL
- shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL)
+ shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL, env.DRIVETRAIN_STACK_INSTALL_TIMEOUT)
}
stage("Deploy platform components") {
// steps: env.PLATFORM_STACK_INSTALL
- shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL)
+ shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL, env.PLATFORM_STACK_INSTALL_TIMEOUT)
}
currentBuild.result = 'SUCCESS'
} catch (e) {
- common.printMsg("Deploy is failed: " + e.message , "red")
+ common.printMsg("Deploy is failed: " + e.message , "purple")
+ report_text = e.message
+ def snapshot_name = "deploy_failed"
shared.run_cmd("""\
dos.py suspend ${ENV_NAME} || true
- dos.py snapshot ${ENV_NAME} deploy_failed || true
+ dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
""")
if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
shared.run_cmd("""\
dos.py resume ${ENV_NAME} || true
""")
- } else {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME} || true
- """)
}
- report_text = e.message
+ shared.devops_snapshot_info(snapshot_name)
throw e
} finally {
shared.create_deploy_result_report(steps, currentBuild.result, report_text)
@@ -60,22 +58,19 @@
}
} catch (e) {
- common.printMsg("Tests are failed: " + e.message, "red")
+ common.printMsg("Tests are failed: " + e.message, "purple")
+ def snapshot_name = "tests_failed"
shared.run_cmd("""\
dos.py suspend ${ENV_NAME} || true
- dos.py snapshot ${ENV_NAME} tests_failed || true
+ dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
""")
- throw e
- } finally {
if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
shared.run_cmd("""\
dos.py resume ${ENV_NAME} || true
""")
- } else {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME} || true
- """)
}
+ shared.devops_snapshot_info(snapshot_name)
+ throw e
}
}
@@ -83,13 +78,32 @@
throttle(['fuel_devops_environment']) {
node ("${NODE_NAME}") {
try {
+ // run deploy stages
deploy(shared, common, steps)
+ // run test stages
test(shared, common, steps)
} catch (e) {
- common.printMsg("Job is failed: " + e.message, "red")
+ common.printMsg("Job is failed: " + e.message, "purple")
throw e
} finally {
- shared.swarm_testrail_report(steps)
+ // shutdown the environment if required
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME} || true
+ """)
+ }
+
+ stage("Archive all xml reports") {
+ archiveArtifacts artifacts: "**/*.xml,**/*.ini,**/*.log,**/*.tar.gz"
+ }
+ stage("report results to testrail") {
+ shared.swarm_testrail_report(steps)
+ }
+ stage("Store TestRail reports to job description") {
+ def String description = readFile("description.txt")
+ currentBuild.description += "\n${description}"
+ }
+
}
}
}
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
index f4c8765..36ea29a 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -24,6 +24,8 @@
@Library('tcp-qa')_
+import groovy.xml.XmlUtil
+
common = new com.mirantis.mk.Common()
shared = new com.mirantis.system_qa.SharedPipeline()
@@ -38,75 +40,90 @@
error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
}
dir("${PARENT_WORKSPACE}") {
- try {
- stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
- println "Remove environment ${ENV_NAME}"
- shared.run_cmd("""\
- dos.py erase ${ENV_NAME} || true
- """)
- println "Remove config drive ISO"
- shared.run_cmd("""\
- rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
- """)
- }
+ stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
+ println "Remove environment ${ENV_NAME}"
+ shared.run_cmd("""\
+ dos.py erase ${ENV_NAME} || true
+ """)
+ println "Remove config drive ISO"
+ shared.run_cmd("""\
+ rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
+ """)
+ }
- stage("Create an environment ${ENV_NAME} in disabled state") {
- // deploy_hardware.xml
- shared.run_cmd("""\
- export ENV_NAME=${ENV_NAME}
- export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
- export MANAGER=devops
- export PYTHONIOENCODING=UTF-8
- export REPOSITORY_SUITE=${MCP_VERSION}
- export TEST_GROUP=test_create_environment
- py.test -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
- """)
+ if (env.TCP_QA_REFS) {
+ stage("Update working dir to patch ${TCP_QA_REFS}") {
+ shared.update_working_dir()
}
+ }
- stage("Generate the model") {
- shared.generate_cookied_model()
- }
+ stage("Create an environment ${ENV_NAME} in disabled state") {
+ // deploy_hardware.xml
+ shared.run_cmd("""\
+ export ENV_NAME=${ENV_NAME}
+ export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
+ export MANAGER=devops
+ export PYTHONIOENCODING=UTF-8
+ export REPOSITORY_SUITE=${MCP_VERSION}
+ export TEST_GROUP=test_create_environment
+ py.test -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
+ """)
+ }
- stage("Generate config drive ISO") {
- shared.generate_configdrive_iso()
- }
+ stage("Generate the model") {
+ shared.generate_cookied_model()
+ }
- stage("Upload generated config drive ISO into volume on cfg01 node") {
- shared.run_cmd("""\
- # Get SALT_MASTER_HOSTNAME to determine the volume name
- . ./tcp_tests/utils/env_salt
- virsh vol-upload ${ENV_NAME}_\${SALT_MASTER_HOSTNAME}_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
- virsh pool-refresh --pool default
- """)
- }
+ stage("Generate config drive ISO") {
+ shared.generate_configdrive_iso()
+ }
- stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
+ stage("Upload generated config drive ISO into volume on cfg01 node") {
+ shared.run_cmd("""\
+ # Get SALT_MASTER_HOSTNAME to determine the volume name
+ . ./tcp_tests/utils/env_salt
+ virsh vol-upload ${ENV_NAME}_\${SALT_MASTER_HOSTNAME}_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
+ virsh pool-refresh --pool default
+ """)
+ }
+
+ stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
+ def xml_report_name = "deploy_salt.xml"
+ try {
// deploy_salt.xml
- shared.run_cmd("""\
+ shared.run_sh("""\
export ENV_NAME=${ENV_NAME}
export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
export MANAGER=devops
export SHUTDOWN_ENV_ON_TEARDOWN=false
- export BOOTSTRAP_TIMEOUT=900
+ export BOOTSTRAP_TIMEOUT=1800
export PYTHONIOENCODING=UTF-8
export REPOSITORY_SUITE=${MCP_VERSION}
export TEST_GROUP=test_bootstrap_salt
- py.test -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_salt.xml -k \${TEST_GROUP}
- sleep 60 # wait for jenkins to start and IO calm down
+ py.test -vvv -s -p no:django -p no:ipdb --junit-xml=${xml_report_name} -k \${TEST_GROUP}
""")
- }
+ // Wait for jenkins to start and IO calm down
+ sleep(60)
- } catch (e) {
- common.printMsg("Job is failed: " + e.message, "red")
- throw e
- } finally {
- // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
- // and report appropriate data to TestRail
- // TODO(ddmitriev): add checks for salt cluster
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME}
- """)
+ } catch (e) {
+ common.printMsg("Saltstack cluster deploy is failed", "purple")
+ if (fileExists(xml_report_name)) {
+ shared.download_logs("deploy_salt")
+ def String junit_report_xml = readFile(xml_report_name)
+ def String junit_report_xml_pretty = new XmlUtil().serialize(junit_report_xml)
+ throw new Exception(junit_report_xml_pretty)
+ } else {
+ throw e
+ }
+ } finally {
+ // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+ // and report appropriate data to TestRail
+ // TODO(ddmitriev): add checks for salt cluster
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME}
+ """)
+ }
}
}
}
diff --git a/jobs/pipelines/swarm-create-cfg-config-drive.groovy b/jobs/pipelines/swarm-create-cfg-config-drive.groovy
new file mode 100644
index 0000000..72d278f
--- /dev/null
+++ b/jobs/pipelines/swarm-create-cfg-config-drive.groovy
@@ -0,0 +1,257 @@
+import java.text.SimpleDateFormat
+
+def gerrit = new com.mirantis.mk.Gerrit()
+def dateFormat = new SimpleDateFormat("yyyyMMddHHmm")
+def date = new Date()
+def common_scripts_commit = "${COMMON_SCRIPTS_COMMIT}"
+def iso_name = "${CONFIG_DRIVE_ISO_NAME}" ?: "cfg01.${CLUSTER_NAME}-config-${dateFormat.format(date)}.iso"
+def node_name = "${NODE_NAME}"
+
+def smc = [:]
+smc['SALT_MASTER_MINION_ID'] = "cfg01.${CLUSTER_NAME}.local"
+smc['SALT_MASTER_DEPLOY_IP'] = "${SALT_MASTER_DEPLOY_IP}"
+smc['DEPLOY_NETWORK_GW'] = "${DEPLOY_NETWORK_GW}"
+smc['DEPLOY_NETWORK_NETMASK'] = "${DEPLOY_NETWORK_NETMASK}"
+smc['DNS_SERVERS'] = "${DNS_SERVERS}"
+smc['PIPELINES_FROM_ISO'] = '${PIPELINES_FROM_ISO}'
+smc['PIPELINE_REPO_URL'] = '${PIPELINE_REPO_URL}'
+smc['MCP_VERSION'] = "${MCP_VERSION}"
+// smc['LOCAL_REPOS'] = 'true'
+smc['MCP_SALT_REPO_KEY'] = "${MCP_SALT_REPO_KEY}"
+smc['MCP_SALT_REPO_URL'] = "${MCP_SALT_REPO_URL}"
+
+def entries(m) {
+ m.collect {k, v -> [k, v]}
+}
+
+node (node_name) {
+
+ timestamps(){
+
+ stage("Clean Workspace") {
+ step([$class: 'WsCleanup'])
+ }
+
+ stage("Get mk-pipelines, pipeline-library and mcp-common-scripts repositories") {
+ def cloned = true
+ withCredentials([[$class: 'SSHUserPrivateKeyBinding',
+ keyFileVariable: "GERRIT_KEY",
+ credentialsId: env.GERRIT_MCP_CREDENTIALS_ID,
+ usernameVariable: "GERRIT_USERNAME",
+ passwordVariable: "GERRIT_PASSWORD"]]) {
+
+ dir("mcp-common-scripts-git") {
+ cloned = gerrit.gerritPatchsetCheckout([
+ credentialsId : "${GERRIT_MCP_CREDENTIALS_ID}",
+ gerritBranch: "${MCP_VERSION}",
+ gerritRefSpec: "${MCP_COMMON_SCRIPTS_REFS}",
+ gerritScheme: "ssh",
+ gerritName: "${GERRIT_USERNAME}",
+ gerritHost: "gerrit.mcp.mirantis.net",
+ gerritPort: "29418",
+ gerritProject: "mcp/mcp-common-scripts"
+ ])
+ }
+ if (!cloned) {
+ error("Failed to clone the repository mcp/mcp-common-scripts")
+ }
+
+ sh ("""\
+ set -ex
+ eval \$(ssh-agent)
+ ssh-add ${GERRIT_KEY}
+ git clone --mirror ssh://${GERRIT_USERNAME}@gerrit.mcp.mirantis.net:29418/mk/mk-pipelines mk-pipelines
+ git clone --mirror ssh://${GERRIT_USERNAME}@gerrit.mcp.mirantis.net:29418/mcp-ci/pipeline-library pipeline-library
+ """)
+
+ if (PIPELINE_LIBRARY_REF != '') {
+ sh ("""\
+ set -ex
+ eval \$(ssh-agent)
+ ssh-add ${GERRIT_KEY}
+ cd pipeline-library
+ git fetch https://gerrit.mcp.mirantis.net/mcp-ci/pipeline-library ${PIPELINE_LIBRARY_REF}
+ git tag ${MCP_VERSION} FETCH_HEAD -f
+ """)
+ }
+ if (MK_PIPELINES_REF != '') {
+ sh ("""\
+ set -ex
+ eval \$(ssh-agent)
+ ssh-add ${GERRIT_KEY}
+ cd mk-pipelines
+ git fetch https://gerrit.mcp.mirantis.net/mcp-ci/mk-pipelines ${MK_PIPELINES_REF}
+ git tag ${MCP_VERSION} FETCH_HEAD -f
+ """)
+ }
+
+ // dir("mk-pipelines-git") {
+ // cloned = gerrit.gerritPatchsetCheckout([
+ // credentialsId : "${GERRIT_MCP_CREDENTIALS_ID}",
+ // gerritRefSpec: "${MK_PIPELINES_REF}",
+ // gerritScheme: "ssh",
+ // gerritName: "${GERRIT_USERNAME}",
+ // gerritHost: "gerrit.mcp.mirantis.net",
+ // gerritPort: "29418",
+ // gerritProject: "mk/mk-pipelines"
+ // ])
+ // }
+ // if (!cloned) {
+ // error("Failed to clone the repository mk/mk-pipelines")
+ // }
+
+ // dir("pipeline-library-git") {
+ // cloned = gerrit.gerritPatchsetCheckout([
+ // credentialsId : "${GERRIT_MCP_CREDENTIALS_ID}",
+ // gerritRefSpec: "${PIPELINE_LIBRARY_REF}",
+ // gerritScheme: "ssh",
+ // gerritName: "${GERRIT_USERNAME}",
+ // gerritHost: "gerrit.mcp.mirantis.net",
+ // gerritPort: "29418",
+ // gerritProject: "mcp-ci/pipeline-library"
+ // ])
+ // }
+ // if (!cloned) {
+ // error("Failed to clone the repository mcp-ci/pipeline-library")
+ // }
+ }
+ //if (PIPELINE_LIBRARY_REF != '') {
+ // sh "cd pipeline-library; git tag ${MCP_VERSION} FETCH_HEAD -f ; cd .."
+ //}
+ //if (MK_PIPELINES_REF != '') {
+ // sh "cd mk-pipelines; git tag ${MCP_VERSION} FETCH_HEAD -f; cd .."
+ //}
+
+ // gerrit.gerritPatchsetCheckout() doesn't support clonning bare repository
+ // sh "git clone --mirror mk-pipelines-git mk-pipelines"
+ // sh "git clone --mirror pipeline-library-git pipeline-library"
+ }
+
+ stage("Prepare arguments for generation config drive") {
+
+ config_drive_script_path = "mcp-common-scripts-git/config-drive/create_config_drive.sh"
+ user_data_script_path = "mcp-common-scripts-git/config-drive/master_config.yaml"
+ sh "chmod +x ${config_drive_script_path}"
+
+ //args = "--user-data user_data --vendor-data user_data2 --hostname cfg01 --model model --mk-pipelines mk-pipelines/ --pipeline-library pipeline-library/ ${iso_name}"
+ args = "--user-data user_data2 --vendor-data ${user_data_script_path} --hostname cfg01 --model model --mk-pipelines mk-pipelines/ --pipeline-library pipeline-library/ ${iso_name}"
+ }
+
+ stage("Get cluster model") {
+ def model_url = "${MODEL_URL}"
+ sh "rm -rf model"
+ if (MODEL_URL_OBJECT_TYPE == 'tar.gz') {
+ sh "wget -O model.tar.gz '${model_url}'"
+ sh "mkdir model && cd model && tar zxfv ../model.tar.gz"
+ } else {
+ sh "git clone --recursive $model_url -b ${MCP_VERSION} model"
+ // remove .git file with hardcoded path
+ sh "rm model/classes/system/.git"
+ }
+ }
+
+ stage("Set data"){
+ for (i in entries(smc)) {
+ sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" ${user_data_script_path}"
+ }
+ }
+
+ stage("Create user_data2"){
+ //http://jen20.com/2015/10/04/cloudconfig-merging.html
+ //TODO(ddmitriev): allow to read such file from
+ // ./tcp_tests/templates/${LAB_CONFIG_NAME}/ directory for each lab
+ def user_data2 = """\
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+#write_files: # write_files don't work as expected because overwrites this key from mcp-common-scripts YAML, losing data
+# - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+# content: |
+# GRUB_RECORDFAIL_TIMEOUT=30
+# GRUB_TIMEOUT=10
+# GRUB_TIMEOUT_STYLE=menu
+#
+# - path: /root/interfaces
+# content: |
+# auto lo
+# iface lo inet loopback
+#
+# auto ens3
+# iface ens3 inet dhcp
+#
+# - path: /root/.ssh/config
+# owner: root:root
+# permissions: '0600'
+# content: |
+# Host *
+# ServerAliveInterval 60
+# ServerAliveCountMax 0
+# StrictHostKeyChecking no
+# UserKnownHostsFile /dev/null
+#
+# - path: /etc/cloud/master_environment_override
+# owner: root:root
+# permissions: '0600'
+# content: |
+# export SALT_MASTER_MINION_ID="cfg01.${CLUSTER_NAME}.local"
+# export SALT_MASTER_DEPLOY_IP="${SALT_MASTER_DEPLOY_IP}"
+# export DEPLOY_NETWORK_GW="${DEPLOY_NETWORK_GW}"
+# export DEPLOY_NETWORK_NETMASK="${DEPLOY_NETWORK_NETMASK}"
+# export DNS_SERVERS="${DNS_SERVERS}"
+# export PIPELINES_FROM_ISO="${PIPELINES_FROM_ISO}"
+# export PIPELINE_REPO_URL="${PIPELINE_REPO_URL}"
+# export MCP_VERSION="${MCP_VERSION}"
+# export LOCAL_REPOS="true"
+# export MCP_SALT_REPO_KEY="${MCP_SALT_REPO_KEY}"
+# export MCP_SALT_REPO_URL="${MCP_SALT_REPO_URL}"
+
+output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ssh_pwauth: True
+users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+
+disable_root: false
+chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo touch /is_cloud_init_started
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/.*/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+
+merge_how: "dict(recurse_array)+list(append)"
+"""
+ writeFile(file: "user_data2", text: user_data2, encoding: "UTF-8")
+ }
+
+ stage("Create config-drive"){
+ // create cfg config-drive
+ // apt package genisoimage is required for this stage
+ sh "./${config_drive_script_path} ${args}"
+ }
+
+ stage("Save artifacts") {
+ archiveArtifacts allowEmptyArchive: false,
+ artifacts: "${iso_name}"
+ }
+
+ stage("Download config drive to slave") {
+ if (DOWNLOAD_CONFIG_DRIVE == 'true') {
+ def b_res = build job: 'download-config-drive',
+ parameters: [
+ string(name: 'IMAGE_URL', value: "${BUILD_URL}/artifact/${iso_name}"),
+ string(name: 'NODE_NAME', value: "${NODE_NAME}")
+ ]
+ } else {
+ echo "Drive only generated. But didn't download"
+ }
+ }
+ }
+}
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
index d067e07..58474b9 100644
--- a/jobs/pipelines/swarm-deploy-cicd.groovy
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -7,7 +7,8 @@
* PARENT_NODE_NAME Name of the jenkins slave to create the environment
* PARENT_WORKSPACE Path to the workspace of the parent job to use tcp-qa repo
* ENV_NAME Fuel-devops environment name
- * STACK_INSTALL Stacks to install using Jenkins on cfg01 node: "core:1800,cicd:1800", where 1800 is timeout
+ * STACK_INSTALL Stacks to install using Jenkins on cfg01 node: "core,cicd"
+ * STACK_INSTALL_TIMEOUT Stacks installation timeout
* TCP_QA_REFS Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
* SHUTDOWN_ENV_ON_TEARDOWN optional, shutdown fuel-devops environment at the end of the job
*
@@ -24,53 +25,56 @@
currentBuild.description = "${PARENT_NODE_NAME}:${ENV_NAME}"
-node ("${PARENT_NODE_NAME}") {
- if (! fileExists("${PARENT_WORKSPACE}")) {
- error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
- }
- dir("${PARENT_WORKSPACE}") {
- try {
+def install_timeout = env.STACK_INSTALL_TIMEOUT.toInteger()
+
+timeout(time: install_timeout + 600, unit: 'SECONDS') {
+
+ node ("${PARENT_NODE_NAME}") {
+ if (! fileExists("${PARENT_WORKSPACE}")) {
+ error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
+ }
+ dir("${PARENT_WORKSPACE}") {
if (! env.STACK_INSTALL) {
error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
}
- // Install core and cicd
- def stack
- def timeout
-
- for (element in "${env.STACK_INSTALL}".split(",")) {
- if (element.contains(':')) {
- (stack, timeout) = element.split(':')
- } else {
- stack = element
- timeout = '1800'
- }
- stage("Run Jenkins job on salt-master [deploy_openstack:${stack}]") {
- shared.run_job_on_day01_node(stack, timeout)
- }
-
- stage("Sanity check the deployed component [${stack}]") {
- shared.sanity_check_component(stack)
- }
-
- stage("Make environment snapshot [${stack}_deployed]") {
- shared.devops_snapshot(stack)
+ if (env.TCP_QA_REFS) {
+ stage("Update working dir to patch ${TCP_QA_REFS}") {
+ shared.update_working_dir()
}
}
- } catch (e) {
- common.printMsg("Job is failed: " + e.message, "red")
- throw e
- } finally {
- // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
- // and report appropriate data to TestRail
- // TODO(ddmitriev): add checks for cicd cluster
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME}
- """)
+ try {
+ // Install core and cicd
+ stage("Run Jenkins job on salt-master [deploy_openstack:${env.STACK_INSTALL}]") {
+ shared.run_job_on_day01_node(env.STACK_INSTALL, install_timeout)
+ }
+
+ for (stack in "${env.STACK_INSTALL}".split(",")) {
+ stage("Sanity check the deployed component [${stack}]") {
+ shared.sanity_check_component(stack)
+ }
+ stage("Make environment snapshot [${stack}_deployed]") {
+ shared.devops_snapshot(stack)
+ }
+ } // for
+
+ } catch (e) {
+ common.printMsg("Job is failed", "purple")
+ shared.download_logs("deploy_drivetrain")
+ throw e
+ } finally {
+ // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+ // and report appropriate data to TestRail
+ // TODO(ddmitriev): add checks for cicd cluster
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME}
+ """)
+ }
}
- }
- }
-}
+
+ } // dir
+ } // node
+}
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-deploy-platform.groovy b/jobs/pipelines/swarm-deploy-platform.groovy
index 54bc43d..c854c73 100644
--- a/jobs/pipelines/swarm-deploy-platform.groovy
+++ b/jobs/pipelines/swarm-deploy-platform.groovy
@@ -7,7 +7,8 @@
* PARENT_NODE_NAME Name of the jenkins slave to create the environment
* PARENT_WORKSPACE Path to the workspace of the parent job to use tcp-qa repo
* ENV_NAME Fuel-devops environment name
- * STACK_INSTALL Stacks to install using Jenkins on CICD cluster: "openstack:3200,stacklight:2400", where 3200 and 2400 are timeouts
+ * STACK_INSTALL Stacks to install using Jenkins on CICD cluster: "openstack,stacklight"
+ * STACK_INSTALL_TIMEOUT Stacks installation timeout
* TCP_QA_REFS Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
* SHUTDOWN_ENV_ON_TEARDOWN optional, shutdown fuel-devops environment at the end of the job
*
@@ -24,53 +25,56 @@
currentBuild.description = "${PARENT_NODE_NAME}:${ENV_NAME}"
-node ("${PARENT_NODE_NAME}") {
- if (! fileExists("${PARENT_WORKSPACE}")) {
- error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
- }
- dir("${PARENT_WORKSPACE}") {
- try {
+def install_timeout = env.STACK_INSTALL_TIMEOUT.toInteger()
+
+timeout(time: install_timeout + 600, unit: 'SECONDS') {
+
+ node ("${PARENT_NODE_NAME}") {
+ if (! fileExists("${PARENT_WORKSPACE}")) {
+ error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
+ }
+ dir("${PARENT_WORKSPACE}") {
if (! env.STACK_INSTALL) {
error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
}
- // Install the cluster
- def stack
- def timeout
-
- for (element in "${STACK_INSTALL}".split(",")) {
- if (element.contains(':')) {
- (stack, timeout) = element.split(':')
- } else {
- stack = element
- timeout = '1800'
- }
- stage("Run Jenkins job on CICD [deploy_openstack:${stack}]") {
- shared.run_job_on_cicd_nodes(stack, timeout)
- }
-
- stage("Sanity check the deployed component [${stack}]") {
- shared.sanity_check_component(stack)
- }
-
- stage("Make environment snapshot [${stack}_deployed]") {
- shared.devops_snapshot(stack)
+ if (env.TCP_QA_REFS) {
+ stage("Update working dir to patch ${TCP_QA_REFS}") {
+ shared.update_working_dir()
}
}
- } catch (e) {
- common.printMsg("Job is failed:" + e.message, "red")
- throw e
- } finally {
- // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
- // and report appropriate data to TestRail
- // TODO(ddmitriev): add checks for the installed stacks
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME}
- """)
+ try {
+ // Install the cluster
+ stage("Run Jenkins job on CICD [deploy_openstack:${env.STACK_INSTALL}]") {
+ shared.run_job_on_cicd_nodes(env.STACK_INSTALL, install_timeout)
+ }
+
+ for (stack in "${env.STACK_INSTALL}".split(",")) {
+ stage("Sanity check the deployed component [${stack}]") {
+ shared.sanity_check_component(stack)
+ }
+ stage("Make environment snapshot [${stack}_deployed]") {
+ shared.devops_snapshot(stack)
+ }
+ } // for
+
+ } catch (e) {
+ common.printMsg("Job is failed", "purple")
+ shared.download_logs("deploy_platform")
+ throw e
+ } finally {
+ // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+ // and report appropriate data to TestRail
+ // TODO(ddmitriev): add checks for the installed stacks
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME}
+ """)
+ }
}
- }
- }
-}
+
+ } // dir
+ } // node
+}
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index 553b8a2..780229d 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -16,6 +16,7 @@
* REPOSITORY_SUITE Not used (backward compatibility, for manual deployment steps only)
* MCP_IMAGE_PATH1604 Not used (backward compatibility, for manual deployment steps only)
* IMAGE_PATH_CFG01_DAY01 Not used (backward compatibility, for manual deployment steps only)
+ * TEMPEST_IMAGE_VERSION Tempest image version: pike by default, can be queens.
*/
@Library('tcp-qa')_
@@ -36,9 +37,16 @@
dir("${PARENT_WORKSPACE}") {
try {
+ if (env.TCP_QA_REFS) {
+ stage("Update working dir to patch ${TCP_QA_REFS}") {
+ shared.update_working_dir()
+ }
+ }
+
stage("Run tests") {
def steps = shared.get_steps_list(PASSED_STEPS)
def sources = """\
+ cd ${PARENT_WORKSPACE}
export ENV_NAME=${ENV_NAME}
. ./tcp_tests/utils/env_salt"""
if (steps.contains('k8s')) {
@@ -47,12 +55,13 @@
}
if (steps.contains('openstack')) {
sources += """
+ export TEMPEST_IMAGE_VERSION=${TEMPEST_IMAGE_VERSION}
# TODO: . ./tcp_tests/utils/env_keystonercv3\n"""
}
def installed = steps.collect {"""\
export ${it}_installed=true"""}.join("\n")
- shared.run_cmd(sources + installed + """
+ shared.run_sh(sources + installed + """
export TESTS_CONFIGS=${ENV_NAME}_salt_deployed.ini
export MANAGER=devops # use 'hardware' fixture to manage fuel-devops environment
export salt_master_host=\$SALT_MASTER_IP # skip salt_deployed fixture
@@ -62,13 +71,27 @@
py.test --junit-xml=nosetests.xml ${RUN_TEST_OPTS}
- dos.py suspend ${ENV_NAME}
- dos.py snapshot ${ENV_NAME} test_completed
""")
+
+ def snapshot_name = "test_completed"
+ shared.download_logs("test_completed")
+ shared.run_cmd("""\
+ dos.py suspend ${ENV_NAME}
+ dos.py snapshot ${ENV_NAME} ${snapshot_name}
+ """)
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+ shared.run_cmd("""\
+ dos.py resume ${ENV_NAME}
+ """)
+ }
+ shared.devops_snapshot_info(snapshot_name)
}
} catch (e) {
- common.printMsg("Job is failed" + e.message, "red")
+ common.printMsg("Job is failed", "purple")
+ // Downloading logs usually not needed here
+ // because tests should use the decorator @pytest.mark.grab_versions
+ // shared.download_logs("test_failed")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-testrail-report.groovy b/jobs/pipelines/swarm-testrail-report.groovy
index 333547a..3849e16 100644
--- a/jobs/pipelines/swarm-testrail-report.groovy
+++ b/jobs/pipelines/swarm-testrail-report.groovy
@@ -10,6 +10,7 @@
where 3200 and 2400 might be timeouts (not used in the testing pipeline)
* PARENT_NODE_NAME Name of the jenkins slave to create the environment
* PARENT_WORKSPACE Path to the workspace of the parent job to use tcp-qa repo
+ * TEMPEST_TEST_SUITE_NAME Name of tempest suite
* TCP_QA_REFS Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
*/
@@ -30,32 +31,46 @@
error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
}
dir("${PARENT_WORKSPACE}") {
+ def description = ''
try {
+
+ if (env.TCP_QA_REFS) {
+ stage("Update working dir to patch ${TCP_QA_REFS}") {
+ shared.update_working_dir()
+ }
+ }
+
def report_name = ''
def testSuiteName = ''
def methodname = ''
def testrail_name_template = ''
def reporter_extra_options = []
- stage("Archive all xml reports") {
- archiveArtifacts artifacts: "**/*.xml"
- }
+ def report_result = ''
+ def report_url = ''
+ // deployment_report_name = "deployment_${ENV_NAME}.xml"
def deployment_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"deployment_${ENV_NAME}.xml\"", returnStdout: true)
+ // tcpqa_report_name =~ "nosetests.xml"
def tcpqa_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"nosetests.xml\"", returnStdout: true)
+ // tempest_report_name =~ "report_*.xml"
def tempest_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"report_*.xml\"", returnStdout: true)
+ // k8s_conformance_report_name =~ conformance_result.xml
def k8s_conformance_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"conformance_result.xml\"", returnStdout: true)
- def stacklight_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"stacklight_report.xml\"", returnStdout: true)
+ // k8s_conformance_report_name =~ conformance_virtlet_result.xml
+ def k8s_conformance_virtlet_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"conformance_virtlet_result.xml\"", returnStdout: true)
+ // stacklight_report_name =~ "stacklight_report.xml" or "report.xml"
+ def stacklight_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"*report.xml\"", returnStdout: true)
common.printMsg(deployment_report_name ? "Found deployment report: ${deployment_report_name}" : "Deployment report not found", deployment_report_name ? "blue" : "red")
common.printMsg(tcpqa_report_name ? "Found tcp-qa report: ${tcpqa_report_name}" : "tcp-qa report not found", tcpqa_report_name ? "blue" : "red")
common.printMsg(tempest_report_name ? "Found tempest report: ${tempest_report_name}" : "tempest report not found", tempest_report_name ? "blue" : "red")
common.printMsg(k8s_conformance_report_name ? "Found k8s conformance report: ${k8s_conformance_report_name}" : "k8s conformance report not found", k8s_conformance_report_name ? "blue" : "red")
+ common.printMsg(k8s_conformance_virtlet_report_name ? "Found k8s conformance virtlet report: ${k8s_conformance_virtlet_report_name}" : "k8s conformance virtlet report not found", k8s_conformance_virtlet_report_name ? "blue" : "red")
common.printMsg(stacklight_report_name ? "Found stacklight-pytest report: ${stacklight_report_name}" : "stacklight-pytest report not found", stacklight_report_name ? "blue" : "red")
if (deployment_report_name) {
stage("Deployment report") {
-// report_name = "deployment_${ENV_NAME}.xml"
testSuiteName = "[MCP] Integration automation"
methodname = '{methodname}'
testrail_name_template = '{title}'
@@ -64,13 +79,19 @@
"--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
"--testrail-case-section-name \'All\'",
]
- shared.upload_results_to_testrail(deployment_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+ report_result = shared.upload_results_to_testrail(deployment_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+ common.printMsg(report_result, "blue")
+ report_url = report_result.split("\n").each {
+ if (it.contains("[TestRun URL]")) {
+ common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
+ description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+ }
+ }
}
}
if (tcpqa_report_name) {
stage("tcp-qa cases report") {
-// report_name = "nosetests.xml"
testSuiteName = "[MCP_X] integration cases"
methodname = "{methodname}"
testrail_name_template = "{title}"
@@ -79,42 +100,103 @@
"--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
"--testrail-case-section-name \'All\'",
]
- shared.upload_results_to_testrail(tcpqa_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+ report_result = shared.upload_results_to_testrail(tcpqa_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+ common.printMsg(report_result, "blue")
+ report_url = report_result.split("\n").each {
+ if (it.contains("[TestRun URL]")) {
+ common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
+ description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+ }
+ }
}
}
if ('openstack' in stacks && tempest_report_name) {
stage("Tempest report") {
-// report_name = "report_*.xml"
- testSuiteName = "[MCP1.1_PIKE]Tempest"
+ testSuiteName = env.TEMPEST_TEST_SUITE_NAME
methodname = "{classname}.{methodname}"
testrail_name_template = "{title}"
- shared.upload_results_to_testrail(tempest_report_name, testSuiteName, methodname, testrail_name_template)
+ report_result = shared.upload_results_to_testrail(tempest_report_name, testSuiteName, methodname, testrail_name_template)
+ common.printMsg(report_result, "blue")
+ report_url = report_result.split("\n").each {
+ if (it.contains("[TestRun URL]")) {
+ common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
+ description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+ }
+ }
}
}
if ('k8s' in stacks && k8s_conformance_report_name) {
stage("K8s conformance report") {
- println "TBD"
- // K8s conformance report
+ def k8s_version=shared.run_cmd_stdout("""\
+ . ./env_k8s_version;
+ echo "\$KUBE_SERVER_VERSION"
+ """).trim().split().last()
+ testSuiteName = "[MCP][k8s]Hyperkube ${k8s_version}.x"
+ methodname = "{methodname}"
+ testrail_name_template = "{title}"
+ reporter_extra_options = [
+ "--send-duplicates",
+ "--testrail-add-missing-cases",
+ "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
+ "--testrail-case-section-name \'Conformance\'",
+ ]
+ report_result = shared.upload_results_to_testrail(k8s_conformance_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+ common.printMsg(report_result, "blue")
+ report_url = report_result.split("\n").each {
+ if (it.contains("[TestRun URL]")) {
+ common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
+ description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+ }
+ }
+ }
+ }
+
+ if ('k8s' in stacks && k8s_conformance_virtlet_report_name) {
+ stage("K8s conformance virtlet report") {
+ testSuiteName = "[k8s] Virtlet"
+ methodname = "{methodname}"
+ testrail_name_template = "{title}"
+ reporter_extra_options = [
+ "--send-duplicates",
+ "--testrail-add-missing-cases",
+ "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
+ "--testrail-case-section-name \'Conformance\'",
+ ]
+ report_result = shared.upload_results_to_testrail(k8s_conformance_virtlet_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+ common.printMsg(report_result, "blue")
+ report_url = report_result.split("\n").each {
+ if (it.contains("[TestRun URL]")) {
+ common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
+ description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+ }
+ }
}
}
if ('stacklight' in stacks && stacklight_report_name) {
stage("stacklight-pytest report") {
-// report_name = "stacklight_report.xml"
testSuiteName = "LMA2.0_Automated"
methodname = "{methodname}"
testrail_name_template = "{title}"
- shared.upload_results_to_testrail(stacklight_report_name, testSuiteName, methodname, testrail_name_template)
+ report_result = shared.upload_results_to_testrail(stacklight_report_name, testSuiteName, methodname, testrail_name_template)
+ common.printMsg(report_result, "blue")
+ report_url = report_result.split("\n").each {
+ if (it.contains("[TestRun URL]")) {
+ common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
+ description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+ }
+ }
}
}
} catch (e) {
- common.printMsg("Job is failed: " + e.message, "red")
+ common.printMsg("Job is failed", "purple")
throw e
} finally {
// reporting is failed for some reason
+ writeFile(file: "description.txt", text: description, encoding: "UTF-8")
}
}
}
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index e4779cd..92b43b2 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -2,6 +2,19 @@
import groovy.xml.XmlUtil
+def run_sh(String cmd) {
+ // run shell script without catching any output
+ def common = new com.mirantis.mk.Common()
+ common.printMsg("Run shell command:\n" + cmd, "blue")
+ def VENV_PATH='/home/jenkins/fuel-devops30'
+ script = """\
+ set -ex;
+ . ${VENV_PATH}/bin/activate;
+ bash -c '${cmd.stripIndent()}'
+ """
+ return sh(script: script)
+}
+
def run_cmd(String cmd, Boolean returnStdout=false) {
def common = new com.mirantis.mk.Common()
common.printMsg("Run shell command:\n" + cmd, "blue")
@@ -42,7 +55,7 @@
if (job_info.getResult() != "SUCCESS") {
currentBuild.result = job_info.getResult()
def build_number = job_info.getNumber()
- common.printMsg("Job '${job_name}' failed, getting details", "red")
+ common.printMsg("Job '${job_name}' failed, getting details", "purple")
def workflow_details=run_cmd_stdout("""\
export JOB_NAME=${job_name}
export BUILD_NUMBER=${build_number}
@@ -63,14 +76,27 @@
parameters: parameters,
propagate: false
+ def build_number = job_info.getNumber()
+ def build_url = job_info.getAbsoluteUrl()
+ def build_status = job_info.getResult()
+ try {
+ // Try to grab 'tar.gz' articacts from the shell job'
+ step($class: 'hudson.plugins.copyartifact.CopyArtifact',
+ projectName: job_name,
+ selector: specific("${build_number}"),
+ filter: "**/*.tar.gz",
+ target: '.',
+ flatten: true,
+ fingerprintArtifacts: true)
+ } catch (none) {
+ common.printMsg("No *.tar.gz files found in artifacts of the build ${build_url}", "purple")
+ }
+
if (job_info.getResult() != "SUCCESS") {
- def build_status = job_info.getResult()
- def build_number = job_info.getNumber()
- def build_url = job_info.getAbsoluteUrl()
def job_url = "${build_url}"
currentBuild.result = build_status
if (junit_report_filename) {
- common.printMsg("Job '${job_url}' failed with status ${build_status}, getting details", "red")
+ common.printMsg("Job '${job_url}' failed with status ${build_status}, getting details", "purple")
step($class: 'hudson.plugins.copyartifact.CopyArtifact',
projectName: job_name,
selector: specific("${build_number}"),
@@ -81,10 +107,8 @@
def String junit_report_xml = readFile("${junit_report_filename}")
def String junit_report_xml_pretty = new XmlUtil().serialize(junit_report_xml)
- // Replace '<' and '>' to '<' and '>' to avoid conflicts between xml tags in the message and JUnit report
- def String junit_report_xml_filtered = junit_report_xml_pretty.replaceAll("<","<").replaceAll(">", ">")
def String msg = "Job '${job_url}' failed with status ${build_status}, JUnit report:\n"
- throw new Exception(msg + junit_report_xml_filtered)
+ throw new Exception(msg + junit_report_xml_pretty)
} else {
throw new Exception("Job '${job_url}' failed with status ${build_status}, please check the console output.")
}
@@ -115,6 +139,17 @@
""")
}
+def update_working_dir() {
+ // Use to fetch a patchset from gerrit to the working dir
+ run_cmd("""\
+ if [ -n "$TCP_QA_REFS" ]; then
+ set -e
+ git fetch https://review.gerrithub.io/Mirantis/tcp-qa $TCP_QA_REFS && git checkout FETCH_HEAD || exit \$?
+ fi
+ pip install -r tcp_tests/requirements.txt
+ """)
+}
+
def swarm_bootstrap_salt_cluster_devops() {
def common = new com.mirantis.mk.Common()
def cookiecutter_template_commit = env.COOKIECUTTER_TEMPLATE_COMMIT ?: env.MCP_VERSION
@@ -146,7 +181,7 @@
build_pipeline_job('swarm-bootstrap-salt-cluster-devops', parameters)
}
-def swarm_deploy_cicd(String stack_to_install='core,cicd') {
+def swarm_deploy_cicd(String stack_to_install, String install_timeout) {
// Run openstack_deploy job on cfg01 Jenkins for specified stacks
def common = new com.mirantis.mk.Common()
def tcp_qa_refs = env.TCP_QA_REFS ?: ''
@@ -155,13 +190,14 @@
string(name: 'PARENT_WORKSPACE', value: pwd()),
string(name: 'ENV_NAME', value: "${ENV_NAME}"),
string(name: 'STACK_INSTALL', value: stack_to_install),
+ string(name: 'STACK_INSTALL_TIMEOUT', value: install_timeout),
string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
]
build_pipeline_job('swarm-deploy-cicd', parameters)
}
-def swarm_deploy_platform(String stack_to_install) {
+def swarm_deploy_platform(String stack_to_install, String install_timeout) {
// Run openstack_deploy job on CICD Jenkins for specified stacks
def common = new com.mirantis.mk.Common()
def tcp_qa_refs = env.TCP_QA_REFS ?: ''
@@ -170,6 +206,7 @@
string(name: 'PARENT_WORKSPACE', value: pwd()),
string(name: 'ENV_NAME', value: "${ENV_NAME}"),
string(name: 'STACK_INSTALL', value: stack_to_install),
+ string(name: 'STACK_INSTALL_TIMEOUT', value: install_timeout),
string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
]
@@ -180,6 +217,7 @@
// Run pytest tests
def common = new com.mirantis.mk.Common()
def tcp_qa_refs = env.TCP_QA_REFS ?: ''
+ def tempest_image_version = env.TEMPEST_IMAGE_VERSION ?: 'pike'
def parameters = [
string(name: 'ENV_NAME', value: "${ENV_NAME}"),
string(name: 'PASSED_STEPS', value: passed_steps),
@@ -192,6 +230,8 @@
string(name: 'REPOSITORY_SUITE', value: "${MCP_VERSION}"),
string(name: 'MCP_IMAGE_PATH1604', value: "${MCP_IMAGE_PATH1604}"),
string(name: 'IMAGE_PATH_CFG01_DAY01', value: "${IMAGE_PATH_CFG01_DAY01}"),
+ string(name: 'TEMPEST_IMAGE_VERSION', value: "${tempest_image_version}"),
+
]
common.printMsg("Start building job 'swarm-run-pytest' with parameters:", "purple")
common.prettyPrint(parameters)
@@ -203,6 +243,7 @@
// Run pytest tests
def common = new com.mirantis.mk.Common()
def tcp_qa_refs = env.TCP_QA_REFS ?: ''
+ def tempest_test_suite_name = env.TEMPEST_TEST_SUITE_NAME
def parameters = [
string(name: 'ENV_NAME', value: "${ENV_NAME}"),
string(name: 'MCP_VERSION', value: "${MCP_VERSION}"),
@@ -210,6 +251,7 @@
string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
string(name: 'PARENT_WORKSPACE', value: pwd()),
string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
+ string(name: 'TEMPEST_TEST_SUITE_NAME', value: "${tempest_test_suite_name}"),
]
common.printMsg("Start building job 'swarm-testrail-report' with parameters:", "purple")
common.prettyPrint(parameters)
@@ -263,8 +305,18 @@
""").trim().split().last()
println("SALT_MASTER_IP=" + SALT_MASTER_IP)
+ def dhcp_ranges_json=run_cmd_stdout("""\
+ fgrep dhcp_ranges ${ENV_NAME}_hardware.ini |
+ fgrep "admin-pool01"|
+ cut -d"=" -f2
+ """).trim().split("\n").last()
+ def dhcp_ranges = new groovy.json.JsonSlurperClassic().parseText(dhcp_ranges_json)
+ def ADMIN_NETWORK_GW = dhcp_ranges['admin-pool01']['gateway']
+ println("ADMIN_NETWORK_GW=" + ADMIN_NETWORK_GW)
+
def mk_pipelines_ref = env.MK_PIPELINES_REF ?: ''
def pipeline_library_ref = env.PIPELINE_LIBRARY_REF ?: ''
+ def tcp_qa_refs = env.TCP_QA_REFS ?: ''
def parameters = [
string(name: 'CLUSTER_NAME', value: "${LAB_CONFIG_NAME}"),
@@ -276,19 +328,23 @@
string(name: 'NODE_NAME', value: "${NODE_NAME}"),
string(name: 'CONFIG_DRIVE_ISO_NAME', value: "${CFG01_CONFIG_IMAGE_NAME}"),
string(name: 'SALT_MASTER_DEPLOY_IP', value: SALT_MASTER_IP),
+ string(name: 'DEPLOY_NETWORK_GW', value: "${ADMIN_NETWORK_GW}"),
string(name: 'PIPELINE_REPO_URL', value: "https://github.com/Mirantis"),
booleanParam(name: 'PIPELINES_FROM_ISO', value: true),
string(name: 'MCP_SALT_REPO_URL', value: "http://apt.mirantis.com/xenial"),
string(name: 'MCP_SALT_REPO_KEY', value: "http://apt.mirantis.com/public.gpg"),
string(name: 'PIPELINE_LIBRARY_REF', value: "${pipeline_library_ref}"),
string(name: 'MK_PIPELINES_REF', value: "${mk_pipelines_ref}"),
+ string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
]
- build_pipeline_job('create-cfg-config-drive', parameters)
+ build_pipeline_job('swarm-create-cfg-config-drive', parameters)
}
-def run_job_on_day01_node(stack_to_install, timeout=1800) {
+def run_job_on_day01_node(stack_to_install, timeout=2400) {
// stack_to_install="core,cicd"
+ def common = new com.mirantis.mk.Common()
def stack = "${stack_to_install}"
+ common.printMsg("Deploy DriveTrain CICD components: ${stack_to_install}", "blue")
try {
run_cmd("""\
export ENV_NAME=${ENV_NAME}
@@ -299,12 +355,13 @@
\\\"SALT_MASTER_URL\\\": \\\"\${SALTAPI_URL}\\\",
\\\"STACK_INSTALL\\\": \\\"${stack}\\\"
}\"
- JOB_PREFIX="[ ${ENV_NAME}/{build_number}:${stack} {time} ] "
+ JOB_PREFIX="[ ${ENV_NAME}/{build_number}:drivetrain {time} ] "
python ./tcp_tests/utils/run_jenkins_job.py --verbose --job-name=deploy_openstack --job-parameters="\$JOB_PARAMETERS" --job-output-prefix="\$JOB_PREFIX"
""")
+ // Wait for IO calm down on cluster nodes
+ sleep(60)
} catch (e) {
- def common = new com.mirantis.mk.Common()
- common.printMsg("Product job 'deploy_openstack' failed, getting details", "red")
+ common.printMsg("Product job 'deploy_openstack' failed, getting details", "purple")
def workflow_details=run_cmd_stdout("""\
. ./tcp_tests/utils/env_salt
. ./tcp_tests/utils/env_jenkins_day01
@@ -316,9 +373,11 @@
}
}
-def run_job_on_cicd_nodes(stack_to_install, timeout=1800) {
+def run_job_on_cicd_nodes(stack_to_install, timeout=2400) {
// stack_to_install="k8s,calico,stacklight"
+ def common = new com.mirantis.mk.Common()
def stack = "${stack_to_install}"
+ common.printMsg("Deploy Platform components: ${stack_to_install}", "blue")
try {
run_cmd("""\
export ENV_NAME=${ENV_NAME}
@@ -329,13 +388,13 @@
\\\"SALT_MASTER_URL\\\": \\\"\${SALTAPI_URL}\\\",
\\\"STACK_INSTALL\\\": \\\"${stack}\\\"
}\"
- JOB_PREFIX="[ ${ENV_NAME}/{build_number}:${stack} {time} ] "
+ JOB_PREFIX="[ ${ENV_NAME}/{build_number}:platform {time} ] "
python ./tcp_tests/utils/run_jenkins_job.py --verbose --job-name=deploy_openstack --job-parameters="\$JOB_PARAMETERS" --job-output-prefix="\$JOB_PREFIX"
- sleep 60 # Wait for IO calm down on cluster nodes
""")
+ // Wait for IO calm down on cluster nodes
+ sleep(60)
} catch (e) {
- def common = new com.mirantis.mk.Common()
- common.printMsg("Product job 'deploy_openstack' failed, getting details", "red")
+ common.printMsg("Product job 'deploy_openstack' failed, getting details", "purple")
def workflow_details=run_cmd_stdout("""\
. ./tcp_tests/utils/env_salt
. ./tcp_tests/utils/env_jenkins_cicd
@@ -362,6 +421,42 @@
}
}
+def download_logs(archive_name_prefix) {
+ // Archive and download logs and debug info from salt nodes in the lab
+ // Do not fail in case of error to not lose the original error from the parent exception.
+ def common = new com.mirantis.mk.Common()
+ common.printMsg("Downloading nodes logs by ${archive_name_prefix}", "blue")
+ run_cmd("""\
+ export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini
+ ./tcp_tests/utils/get_logs.py --archive-name-prefix ${archive_name_prefix} || true
+ """)
+}
+
+def devops_snapshot_info(snapshot_name) {
+ // Print helper message after snapshot
+ def common = new com.mirantis.mk.Common()
+
+ def SALT_MASTER_IP=run_cmd_stdout("""\
+ . ./tcp_tests/utils/env_salt
+ echo \$SALT_MASTER_IP
+ """).trim().split().last()
+ def login = "root" // set fixed 'root' login for now
+ def password = "r00tme" // set fixed 'root' login for now
+ def key_file = "${env.WORKSPACE}/id_rsa" // set fixed path in the WORKSPACE
+ def VENV_PATH='/home/jenkins/fuel-devops30'
+
+ common.printMsg("""\
+#########################
+# To revert the snapshot:
+#########################
+. ${VENV_PATH}/bin/activate;
+dos.py revert ${ENV_NAME} ${snapshot_name};
+dos.py resume ${ENV_NAME};
+# dos.py time-sync ${ENV_NAME}; # Optional\n
+ssh -i ${key_file} ${login}@${SALT_MASTER_IP} # Optional password: ${password}
+""", "cyan")
+}
+
def devops_snapshot(stack) {
// Make the snapshot with name "${stack}_deployed"
// for all VMs in the environment.
@@ -382,6 +477,7 @@
cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_${stack}_deployed.ini
fi
""")
+ devops_snapshot_info("${stack}_deployed")
}
def get_steps_list(steps) {
@@ -393,11 +489,15 @@
// <filename> is name of the XML report file that will be created
// <status> is one of the 'success', 'skipped', 'failure' or 'error'
// 'error' status is assumed as 'Blocker' in TestRail reporter
+
+ // Replace '<' and '>' to '<' and '>' to avoid conflicts between xml tags in the message and JUnit report
+ def String text_filtered = text.replaceAll("<","<").replaceAll(">", ">")
+
def script = """\
<?xml version=\"1.0\" encoding=\"utf-8\"?>
<testsuite>
<testcase classname=\"${classname}\" name=\"${name}\" time=\"0\">
- <${status} message=\"${status_message}\">${text}</${status}>
+ <${status} message=\"${status_message}\">${text_filtered}</${status}>
<system-out>${stdout}</system-out>
<system-err>${stderr}</system-err>
</testcase>
@@ -413,6 +513,7 @@
def testrailProject = "Mirantis Cloud Platform"
def testPlanName = "[MCP-Q2]System-${MCP_VERSION}-${new Date().format('yyyy-MM-dd')}"
def testrailMilestone = "MCP1.1"
+ def testrailCaseMaxNameLenght = 250
def jobURL = env.BUILD_URL
def reporterOptions = [
@@ -429,6 +530,7 @@
"--xunit-name-template \"${methodname}\"",
"--testrail-name-template \"${testrail_name_template}\"",
"--test-results-link \"${jobURL}\"",
+ "--testrail-case-max-name-lenght ${testrailCaseMaxNameLenght}",
] + reporter_extra_options
def script = """
@@ -445,7 +547,7 @@
passwordVariable: 'TESTRAIL_PASSWORD',
usernameVariable: 'TESTRAIL_USER']
]) {
- return run_cmd(script)
+ return run_cmd_stdout(script)
}
}
diff --git a/tcp_tests/environment/lab03_ovs_dpdk/nodes.yml b/tcp_tests/environment/lab03_ovs_dpdk/nodes.yml
index 0fe1e7e..67fb53c 100644
--- a/tcp_tests/environment/lab03_ovs_dpdk/nodes.yml
+++ b/tcp_tests/environment/lab03_ovs_dpdk/nodes.yml
@@ -22,7 +22,7 @@
proto: manual
slaves: enp2s0f1
type: bond
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- enp2s0f1
require_interfaces:
- enp2s0f1
@@ -30,7 +30,7 @@
enabled: true
proto: manual
type: vlan
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- bond0
require_interfaces:
- bond0
@@ -40,7 +40,7 @@
netmask: 255.255.255.0
proto: static
type: bridge
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- bond0.2416
require_interfaces:
- bond0.2416
@@ -54,7 +54,7 @@
netmask: 255.255.255.192
proto: static
type: bridge
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- enp2s0f0
require_interfaces:
- enp2s0f0
@@ -89,7 +89,7 @@
proto: manual
slaves: enp2s0f1
type: bond
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- enp2s0f1
require_interfaces:
- enp2s0f1
@@ -97,7 +97,7 @@
enabled: true
proto: manual
type: vlan
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- bond0
require_interfaces:
- bond0
@@ -107,7 +107,7 @@
netmask: 255.255.255.0
proto: static
type: bridge
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- bond0.2416
require_interfaces:
- bond0.2416
@@ -121,7 +121,7 @@
netmask: 255.255.255.192
proto: static
type: bridge
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- enp2s0f0
require_interfaces:
- enp2s0f0
@@ -156,7 +156,7 @@
proto: manual
slaves: eno2
type: bond
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- eno2
require_interfaces:
- eno2
@@ -164,7 +164,7 @@
enabled: true
proto: manual
type: vlan
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- bond0
require_interfaces:
- bond0
@@ -174,7 +174,7 @@
netmask: 255.255.255.0
proto: static
type: bridge
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- bond0.2416
require_interfaces:
- bond0.2416
@@ -188,7 +188,7 @@
netmask: 255.255.255.192
proto: static
type: bridge
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- eno1
require_interfaces:
- eno1
@@ -263,7 +263,7 @@
proto: manual
slaves: enp3s0f1 enp5s0f0
type: bond
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- enp3s0f1
- enp5s0f0
require_interfaces:
@@ -273,7 +273,7 @@
enabled: true
proto: manual
type: vlan
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- bond0
require_interfaces:
- bond0
@@ -283,7 +283,7 @@
netmask: 255.255.255.0
proto: static
type: bridge
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- bond0.2416
require_interfaces:
- bond0.2416
@@ -321,7 +321,7 @@
# slaves: eth0 eth3
slaves: eth0
type: bond
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- eth0
# - eth3
require_interfaces:
@@ -331,7 +331,7 @@
enabled: true
proto: manual
type: vlan
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- bond0
require_interfaces:
- bond0
@@ -346,7 +346,7 @@
netmask: 255.255.255.0
proto: static
type: bridge
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- bond0.2416
require_interfaces:
- bond0.2416
@@ -397,7 +397,7 @@
proto: manual
slaves: enp3s0f1
type: bond
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- enp3s0f1
require_interfaces:
- enp3s0f1
@@ -483,7 +483,7 @@
proto: manual
slaves: eno2
type: bond
- use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+ use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
- eno2
require_interfaces:
- eno2
diff --git a/tcp_tests/fixtures/day1_fixtures.py b/tcp_tests/fixtures/day1_fixtures.py
index ff3a0b5..e223a2b 100644
--- a/tcp_tests/fixtures/day1_fixtures.py
+++ b/tcp_tests/fixtures/day1_fixtures.py
@@ -71,10 +71,6 @@
"region": {
"machines": macs}}}}
- if not config.day1_underlay.lvm:
- underlay.enable_lvm(hardware.lvm_storages())
- config.day1_underlay.lvm = underlay.config_lvm
-
hardware.create_snapshot(ext.SNAPSHOT.day1_underlay)
else:
diff --git a/tcp_tests/fixtures/k8s_fixtures.py b/tcp_tests/fixtures/k8s_fixtures.py
index e581b86..409034e 100644
--- a/tcp_tests/fixtures/k8s_fixtures.py
+++ b/tcp_tests/fixtures/k8s_fixtures.py
@@ -77,16 +77,16 @@
interfaces_pillar = k8s_actions._salt.get_pillar(
tgt=tgt, pillar='linux:network:interface')[0]
- for node_name, interfaces in interfaces_pillar.items():
+ for minion_id, interfaces in interfaces_pillar.items():
for iface_name, iface in interfaces.items():
iface_name = iface.get('name', iface_name)
default_proto = 'static' if 'address' in iface else 'dhcp'
if iface.get('proto', default_proto) != 'dhcp':
LOG.warning('Trying to kill dhclient for iface {0} '
- 'on node {1}'.format(iface_name, node_name))
+ 'on node {1}'.format(iface_name, minion_id))
underlay.check_call(
cmd='pkill -f "dhclient.*{}"'.format(iface_name),
- node_name=node_name, raise_on_err=False)
+ node_name=minion_id, raise_on_err=False)
LOG.warning('Restarting keepalived service on controllers...')
k8s_actions._salt.local(tgt='ctl*', fun='cmd.run',
@@ -180,8 +180,11 @@
if hasattr(request.node, 'rep_call') and \
(request.node.rep_call.passed or request.node.rep_call.failed)\
and cncf_publisher:
+ LOG.info("Waiting 60 sec for sonobuoy to generate results archive")
+ time.sleep(60)
+ LOG.info("Downloading sonobuoy results archive")
files = utils.extract_name_from_mark(cncf_publisher) \
- or "{}".format(func_name)
+ or "{}".format(func_name)
k8s_deployed.extract_file_to_node(
system='k8s', file_path='tmp/sonobuoy',
pod_name='sonobuoy', pod_namespace='heptio-sonobuoy'
diff --git a/tcp_tests/fixtures/runtest_fixtures.py b/tcp_tests/fixtures/runtest_fixtures.py
index 5a43b42..3e1a45b 100644
--- a/tcp_tests/fixtures/runtest_fixtures.py
+++ b/tcp_tests/fixtures/runtest_fixtures.py
@@ -18,7 +18,7 @@
@pytest.fixture(scope='function')
-def tempest_actions(underlay_actions, salt_actions):
+def tempest_actions(config, underlay_actions, salt_actions):
"""
Run tempest tests
"""
@@ -28,6 +28,7 @@
domain_name = settings.DOMAIN_NAME
target = settings.TEMPEST_TARGET
runtest = RuntestManager(
+ config,
underlay_actions, salt_actions,
cluster_name=cluster_name,
domain_name=domain_name,
diff --git a/tcp_tests/fixtures/salt_fixtures.py b/tcp_tests/fixtures/salt_fixtures.py
index 7f4ce60..226ab22 100644
--- a/tcp_tests/fixtures/salt_fixtures.py
+++ b/tcp_tests/fixtures/salt_fixtures.py
@@ -71,13 +71,7 @@
LOG.info("############ Executing command ####### {0}".format(commands))
salt_actions.install(commands)
- salt_nodes = salt_actions.get_ssh_data()
- config.underlay.ssh = config.underlay.ssh + \
- [node for node in salt_nodes
- if not any(node['node_name'] == n['node_name']
- for n in config.underlay.ssh)]
- underlay.config_ssh = []
- underlay.add_config_ssh(config.underlay.ssh)
+ salt_actions.update_ssh_data_from_minions()
hardware.create_snapshot(ext.SNAPSHOT.salt_deployed)
salt_actions.sync_time()
diff --git a/tcp_tests/fixtures/underlay_fixtures.py b/tcp_tests/fixtures/underlay_fixtures.py
index 65677a9..a3bcea4 100644
--- a/tcp_tests/fixtures/underlay_fixtures.py
+++ b/tcp_tests/fixtures/underlay_fixtures.py
@@ -200,10 +200,6 @@
LOG.info("Config - {}".format(config))
underlay_actions.add_config_ssh(config.underlay.ssh)
- if not config.underlay.lvm:
- underlay_actions.enable_lvm(hardware.lvm_storages())
- config.underlay.lvm = underlay_actions.config_lvm
-
hardware.create_snapshot(ext.SNAPSHOT.underlay)
return underlay_actions
@@ -234,10 +230,6 @@
"region": {
"machines": macs}}}}
- if not config.underlay.lvm:
- underlay_actions.enable_lvm(hardware.lvm_storages())
- config.underlay.lvm = underlay_actions.config_lvm
-
for node in hardware.slave_nodes:
# For correct comissioning by MaaS nodes should be powered off
node.destroy()
diff --git a/tcp_tests/helpers/ext.py b/tcp_tests/helpers/ext.py
index 19bdf08..7abc53e 100644
--- a/tcp_tests/helpers/ext.py
+++ b/tcp_tests/helpers/ext.py
@@ -34,8 +34,11 @@
NETWORK_TYPE = enum(
- 'private',
- 'admin'
+ 'admin',
+ 'control',
+ 'tenant',
+ 'storage',
+ 'external',
)
diff --git a/tcp_tests/helpers/netchecker.py b/tcp_tests/helpers/netchecker.py
index dc58d9c..d3ba466 100644
--- a/tcp_tests/helpers/netchecker.py
+++ b/tcp_tests/helpers/netchecker.py
@@ -66,7 +66,7 @@
else:
assert self.get_connectivity_status().status_code == 400
- def wait_check_network(self, works, timeout=60, interval=10):
+ def wait_check_network(self, works, timeout=600, interval=10):
helpers.wait_pass(
lambda: self.check_network(works=works),
timeout=timeout,
diff --git a/tcp_tests/helpers/utils.py b/tcp_tests/helpers/utils.py
index f2311d4..480a646 100644
--- a/tcp_tests/helpers/utils.py
+++ b/tcp_tests/helpers/utils.py
@@ -356,6 +356,12 @@
return var
+ def basename(path):
+ return os.path.basename(path)
+
+ def dirname(path):
+ return os.path.dirname(path)
+
if options is None:
options = {}
options.update({'os_env': os_env, })
@@ -366,6 +372,9 @@
environment = jinja2.Environment(
loader=jinja2.FileSystemLoader([path, os.path.dirname(path)],
followlinks=True))
+ environment.filters['basename'] = basename
+ environment.filters['dirname'] = dirname
+
template = environment.get_template(filename).render(options)
if required_env_vars and log_env_vars:
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
index 7424a49..17ad452 100644
--- a/tcp_tests/managers/envmanager_devops.py
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -71,6 +71,7 @@
self._create_environment()
self.set_dns_config()
self.set_address_pools_config()
+ self.set_dhcp_ranges_config()
@property
def _devops_config(self):
@@ -93,39 +94,6 @@
)
self.__devops_config = conf
- def lvm_storages(self):
- """Returns a dict object of lvm storages in current environment
-
- returned data example:
- {
- "master": {
- "id": "virtio-bff72959d1a54cb19d08"
- },
- "slave-0": {
- "id": "virtio-5e33affc8fe44503839f"
- },
- "slave-1": {
- "id": "virtio-10b6a262f1ec4341a1ba"
- },
- }
-
- :rtype: dict
- """
- result = {}
- for node in self.__env.get_nodes(role__in=ext.UNDERLAY_NODE_ROLES):
- lvm = filter(lambda x: x.volume.name == 'lvm', node.disk_devices)
- if len(lvm) == 0:
- continue
- lvm = lvm[0]
- result[node.name] = {}
- result_node = result[node.name]
- result_node['id'] = "{bus}-{serial}".format(
- bus=lvm.bus,
- serial=lvm.volume.serial[:20])
- LOG.info("Got disk-id '{}' for node '{}'".format(
- result_node['id'], node.name))
- return result
-
@property
def _d_env_name(self):
"""Get environment name from fuel devops config
@@ -161,6 +129,7 @@
for d_node in self.__env.get_nodes(role__in=roles):
ssh_data = {
'node_name': d_node.name,
+ 'minion_id': d_node.name,
'roles': [d_node.role],
'address_pool': self._get_network_pool(
ext.NETWORK_TYPE.admin).address_pool.name,
@@ -308,7 +277,8 @@
self.__env.start()
LOG.info('Environment "{0}" started'.format(self.__env.name))
check_cloudinit_started = '[ -f /is_cloud_init_started ]'
- check_cloudinit_finished = '[ -f /is_cloud_init_finished ]'
+ check_cloudinit_finished = ('[ -f /is_cloud_init_finished ] || '
+ '[ -f /var/log/mcp/.bootstrap_done ]')
check_cloudinit_failed = 'cat /is_cloud_init_failed'
passed = {}
for node in self.__env.get_nodes(role__in=underlay_node_roles):
@@ -522,3 +492,14 @@
"""Store address pools CIDRs in config object"""
for ap in self.__env.get_address_pools():
self.__config.underlay.address_pools[ap.name] = ap.net
+
+ def set_dhcp_ranges_config(self):
+ """Store DHCP ranges in config object"""
+ for ap in self.__env.get_address_pools():
+ if "gateway" in ap.ip_reserved and "dhcp" in ap.ip_ranges:
+ self.__config.underlay.dhcp_ranges[ap.name] = {
+ "cidr": ap.net,
+ "start": ap.ip_range_start("dhcp"),
+ "end": ap.ip_range_end("dhcp"),
+ "gateway": ap.gateway,
+ }
diff --git a/tcp_tests/managers/envmanager_empty.py b/tcp_tests/managers/envmanager_empty.py
index c4bb57e..39fd126 100644
--- a/tcp_tests/managers/envmanager_empty.py
+++ b/tcp_tests/managers/envmanager_empty.py
@@ -30,26 +30,6 @@
"""
self.__config = config
- def lvm_storages(self):
- """Returns data of lvm_storages on nodes in environment
-
- It's expected that data of self.__config.lvm_storages will be
- like this:
- {
- "node1": {
- "device": "vdb"
- },
- "node2": {
- "device": "vdb"
- },
- "node3": {
- "device": "vdb"
- },
- }
- :rtype: dict
- """
- return self.__config.underlay.lvm
-
def get_ssh_data(self, roles=None):
raise Exception("EnvironmentManagerEmpty doesn't have SSH details. "
"Please provide SSH details in config.underlay.ssh")
diff --git a/tcp_tests/managers/execute_commands.py b/tcp_tests/managers/execute_commands.py
index adb76dc..6dcf615 100644
--- a/tcp_tests/managers/execute_commands.py
+++ b/tcp_tests/managers/execute_commands.py
@@ -87,6 +87,7 @@
retry_count = retry.get('count', 1)
retry_delay = retry.get('delay', 1)
skip_fail = step.get('skip_fail', False)
+ timeout = step.get('timeout', None)
with self.__underlay.remote(node_name=node_name) as remote:
@@ -101,7 +102,7 @@
LOG.info("\n\n{0}\n{1}".format(
msg + retry_msg, '=' * len(msg + retry_msg)))
- result = remote.execute(cmd, verbose=True)
+ result = remote.execute(cmd, timeout=timeout, verbose=True)
if return_res:
return result
@@ -114,6 +115,9 @@
failed += 1
if 'Minion did not return. [Not connected]' in s:
failed += 1
+ if ('Salt request timed out. The master is not responding.'
+ in s):
+ failed += 1
if s.startswith("[CRITICAL]"):
failed += 1
if 'Fatal' in s:
@@ -131,8 +135,17 @@
if x == 1 and skip_fail is False:
# In the last retry iteration, raise an exception
- raise Exception("Step '{0}' failed"
- .format(description))
+ raise Exception("Step '{0}' failed:\n"
+ "=============== Command: ==============\n"
+ "{1}\n"
+ "=============== STDOUT: ===============\n"
+ "{2}\n"
+ "=============== STDERR: ===============\n"
+ "{3}\n"
+ .format(description,
+ cmd,
+ result.stdout_str,
+ result.stderr_str))
def command2(self, step, msg):
# Required fields
@@ -148,6 +161,7 @@
retry_count = retry.get('count', 1)
retry_delay = retry.get('delay', 1)
skip_fail = step.get('skip_fail', False)
+ timeout = step.get('timeout', None)
if not bool(state) ^ bool(states):
raise ValueError("You should use state or states in step")
@@ -165,7 +179,7 @@
method = getattr(self._salt, self._salt._map[do])
command_ret = method(tgt=target, state=state or states,
- args=args, kwargs=kwargs)
+ args=args, kwargs=kwargs, timeout=timeout)
command_ret = command_ret if \
isinstance(command_ret, list) else [command_ret]
results = [(r['return'][0], f) for r, f in command_ret]
@@ -225,7 +239,8 @@
result = {}
with self.__underlay.local() as local:
- result = local.execute('cd {0} && find . -type f -name "{1}"'
+ result = local.execute('cd {0} && find . -maxdepth 1 -type f'
+ ' -name "{1}"'
.format(local_path, local_filename))
LOG.info("Found files to upload:\n{0}".format(result))
@@ -269,7 +284,7 @@
with self.__underlay.remote(node_name=node_name) as remote:
- result = remote.execute('find {0} -type f -name {1}'
+ result = remote.execute('find {0} -maxdepth 1 -type f -name {1}'
.format(remote_path, remote_filename))
LOG.info("Found files to download:\n{0}".format(result))
diff --git a/tcp_tests/managers/jenkins/client.py b/tcp_tests/managers/jenkins/client.py
index fbd5c43..afc8900 100644
--- a/tcp_tests/managers/jenkins/client.py
+++ b/tcp_tests/managers/jenkins/client.py
@@ -143,8 +143,8 @@
building,
timeout=timeout,
interval=interval,
- timeout_msg='Timeout waiting, job {0} are not finished "{1}" build'
- ' still'.format(name, build_id))
+ timeout_msg=('Timeout waiting the job {0}:{1} in {2} sec.'
+ .format(name, build_id, timeout)))
def get_build_output(self, name, build_id):
return self.__client.get_build_console_output(name, build_id)
diff --git a/tcp_tests/managers/k8s/cluster.py b/tcp_tests/managers/k8s/cluster.py
index db7bb18..1531f0d 100644
--- a/tcp_tests/managers/k8s/cluster.py
+++ b/tcp_tests/managers/k8s/cluster.py
@@ -92,6 +92,7 @@
self.api_autoscaling = client.AutoscalingV1Api(api_client)
self.api_batch = client.BatchV1Api(api_client)
self.api_rbac_auth = client.RbacAuthorizationV1Api(api_client)
+ self.api_version = client.VersionApi(api_client)
self.nodes = K8sNodeManager(self)
self.pods = K8sPodManager(self)
diff --git a/tcp_tests/managers/k8s/ingresses.py b/tcp_tests/managers/k8s/ingresses.py
index 906dc31..5dd353c 100644
--- a/tcp_tests/managers/k8s/ingresses.py
+++ b/tcp_tests/managers/k8s/ingresses.py
@@ -14,6 +14,8 @@
from kubernetes import client
+from devops.helpers import helpers
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
@@ -41,6 +43,12 @@
self._manager.api.delete_namespaced_ingress(
self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
+ def wait_ready(self, timeout=120, interval=2):
+ helpers.wait(
+ lambda: self.read().status.load_balancer.ingress is not None,
+ timeout=timeout, interval=interval)
+ return self
+
class K8sIngressManager(K8sBaseManager):
resource_class = K8sIngress
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 96d60a0..79974d3 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -112,6 +112,41 @@
names.sort()
return names[0]
+ @property
+ def controller_minion_id(self):
+ """ Return node name of controller node that used for all actions """
+ minion_ids = [minion_id['minion_id'] for minion_id in
+ self.get_controllers()]
+ # we want to return same controller name every time
+ minion_ids.sort()
+ return minion_ids[0]
+
+ @property
+ def is_metallb_enabled(self):
+ ctl_tgt = self.controller_minion_id
+ LOG.debug("Controller target: {}".format(ctl_tgt))
+
+ result = self._salt.get_pillar(
+ tgt=ctl_tgt,
+ pillar='kubernetes:common:addons:metallb:enabled')
+ metallb = result[0].get(ctl_tgt, False)
+ LOG.info("{} kubernetes:common:addons:metallb:enabled: {}"
+ .format(ctl_tgt, bool(metallb)))
+ return metallb
+
+ @property
+ def is_ingress_nginx_enabled(self):
+ ctl_tgt = self.controller_minion_id
+ LOG.debug("Controller target: {}".format(ctl_tgt))
+
+ result = self._salt.get_pillar(
+ tgt=ctl_tgt,
+ pillar='kubernetes:common:addons:ingress-nginx:enabled')
+ ingress_nginx = result[0].get(ctl_tgt, False)
+ LOG.info("{} kubernetes:common:addons:ingress-nginx:enabled: {}"
+ .format(ctl_tgt, bool(ingress_nginx)))
+ return ingress_nginx
+
def controller_check_call(self, cmd, **kwargs):
""" Run command on controller and return result """
LOG.info("running cmd on k8s controller: {}".format(cmd))
@@ -192,31 +227,32 @@
image=self.__config.k8s.k8s_conformance_image)
return self.__underlay.check_call(
cmd=cmd, node_name=node_name, timeout=timeout,
- raise_on_err=raise_on_err)
+ raise_on_err=raise_on_err, verbose=True)
def run_virtlet_conformance(self, timeout=60 * 120,
- log_file='virtlet_conformance.log'):
+ log_file='virtlet_conformance.log',
+ report_name="report.xml"):
if self.__config.k8s.run_extended_virtlet_conformance:
ci_image = "cloud-images.ubuntu.com/xenial/current/" \
"xenial-server-cloudimg-amd64-disk1.img"
cmd = ("set -o pipefail; "
"docker run --net=host {0} /virtlet-e2e-tests "
- "-include-cloud-init-tests -junitOutput report.xml "
+ "-include-cloud-init-tests -junitOutput {3} "
"-image {2} -sshuser ubuntu -memoryLimit 1024 "
"-alsologtostderr -cluster-url http://127.0.0.1:8080 "
"-ginkgo.focus '\[Conformance\]' "
"| tee {1}".format(
self.__config.k8s_deploy.kubernetes_virtlet_image,
- log_file, ci_image))
+ log_file, ci_image, report_name))
else:
cmd = ("set -o pipefail; "
"docker run --net=host {0} /virtlet-e2e-tests "
- "-junitOutput report.xml "
+ "-junitOutput {2} "
"-alsologtostderr -cluster-url http://127.0.0.1:8080 "
"-ginkgo.focus '\[Conformance\]' "
"| tee {1}".format(
self.__config.k8s_deploy.kubernetes_virtlet_image,
- log_file))
+ log_file, report_name))
LOG.info("Executing: {}".format(cmd))
with self.__underlay.remote(
node_name=self.controller_name) as remote:
@@ -227,7 +263,7 @@
LOG.info("Test results stderr: {}".format(stderr))
return result
- def start_k8s_cncf_verification(self, timeout=60 * 90):
+ def start_k8s_cncf_verification(self, timeout=60 * 180):
"""
Build sonobuoy using golang docker image and install it in system
Then generate sonobuoy verification manifest using gen command
@@ -259,7 +295,7 @@
LOG.info("Waiting for CNCF to complete")
helpers.wait(
lambda: sonobuoy_status() == 'complete',
- interval=30, timeout=timeout,
+ interval=120, timeout=timeout,
timeout_msg="Timeout for CNCF reached."
)
@@ -315,6 +351,7 @@
r.check_call(cmd, raise_on_err=False)
LOG.info("Downloading the artifact {0}".format(log_file))
r.download(destination=log_file, target=os.getcwd())
+ self.store_server_version(os.path.join(os.getcwd(), 'env_k8s_version'))
def combine_xunit(self, path, output):
"""
@@ -379,6 +416,26 @@
LOG.debug("{0}\nresult:\n{1}".format(cmd, result['stdout']))
return result['stdout']
+ def store_server_version(self, env_file_path):
+ """Store Kubernetes server version in bash source file"""
+
+ def digits(string):
+ return ''.join(n for n in string if n.isdigit())
+
+ ver = self.api.api_version.get_code()
+ LOG.debug("Got Kubernetes server version:\n{0}".format(ver))
+
+ env_version = ("export KUBE_SERVER_VERSION={0}.{1}\n"
+ "export KUBE_SERVER_GIT_VERSION={2}\n"
+ .format(digits(ver.major),
+ digits(ver.minor),
+ ver.git_version))
+
+ LOG.info("Kubernetes server version is stored to {0}:\n{1}"
+ .format(env_file_path, env_version))
+ with open(env_file_path, 'w') as kver:
+ kver.write(env_version)
+
class K8SKubectlCli(object):
""" Contain kubectl cli commands and api wrappers"""
@@ -450,8 +507,6 @@
def get_pod_dom_uuid(self, pod):
uuid_name_map = self.virtlet_execute(
pod.read().spec.node_name, 'virsh list --uuid --name')['stdout']
- LOG.info("HEHEHEH {}".format(uuid_name_map))
- LOG.info("MDAMDMAD {}".format(pod.name))
for line in uuid_name_map:
if line.rstrip().endswith("-{}".format(pod.name)):
return line.split(" ")[0]
@@ -502,6 +557,7 @@
def expose(self, service_type='ClusterIP'):
service_name = "{0}-s{1}".format(self._deployment.name, self._index)
+ self._index += 1
self._svc = self._manager.kubectl.expose(
self._deployment, port=self._port,
service_name=service_name, service_type=service_type)
@@ -518,3 +574,9 @@
def is_service_available(self, svc=None, external=False):
return "Hello Kubernetes!" in self.curl(svc, external=external)
+
+ def delete(self):
+ for svc in self._manager.api.services.list_all(
+ name_prefix="{}-s".format(self._deployment.name)):
+ svc.delete()
+ self._deployment.delete()
diff --git a/tcp_tests/managers/rallymanager.py b/tcp_tests/managers/rallymanager.py
index 5b7fd4c..589e1ee 100644
--- a/tcp_tests/managers/rallymanager.py
+++ b/tcp_tests/managers/rallymanager.py
@@ -97,7 +97,7 @@
" docker pull {image}:{version}".format(image=image,
version=version))
self._underlay.check_call(cmd, node_name=self._node_name)
- except Exception as e:
+ except Exception:
LOG.debug('Cannot install docker-ce')
cmd = ("apt-get -y install docker.io &&"
" docker pull {image}:{version}".format(image=image,
diff --git a/tcp_tests/managers/runtestmanager.py b/tcp_tests/managers/runtestmanager.py
index c400556..e7fc15c 100644
--- a/tcp_tests/managers/runtestmanager.py
+++ b/tcp_tests/managers/runtestmanager.py
@@ -14,82 +14,17 @@
import json
import os
-import time
from devops.helpers import helpers
from tcp_tests import logger
from tcp_tests import settings
+
LOG = logger.logger
TEMPEST_CFG_DIR = '/tmp/test'
-CONFIG = {
- 'classes': ['service.runtest.tempest',
- 'service.runtest.tempest.services.manila.glance'],
- 'parameters': {
- '_param': {
- 'runtest_tempest_cfg_dir': TEMPEST_CFG_DIR,
- 'runtest_tempest_cfg_name': 'tempest.conf',
- 'runtest_tempest_public_net': 'net04_ext',
- 'tempest_test_target': 'gtw01*'
- },
- 'neutron': {
- 'client': {
- 'enabled': True
- }
- },
- 'runtest': {
- 'enabled': True,
- 'keystonerc_node': 'ctl01*',
- 'tempest': {
- 'enabled': True,
- 'cfg_dir': '${_param:runtest_tempest_cfg_dir}',
- 'cfg_name': '${_param:runtest_tempest_cfg_name}',
- 'DEFAULT': {
- 'log_file': 'tempest.log'
- },
- 'compute': {
- 'build_timeout': 600,
- 'max_microversion': 2.53,
- 'min_compute_nodes': 2,
- 'min_microversion': 2.1,
- 'volume_device_name': 'vdc'
- },
- 'convert_to_uuid': {
- 'network': {
- 'public_network_id':
- '${_param:runtest_tempest_public_net}'
- }
- },
- 'dns_feature_enabled': {
- 'api_admin': False,
- 'api_v1': False,
- 'api_v2': True,
- 'api_v2_quotas': True,
- 'api_v2_root_recordsets': True,
- 'bug_1573141_fixed': True
- },
- 'heat_plugin': {
- 'floating_network_name':
- '${_param:runtest_tempest_public_net}'
- },
- 'network': {
- 'floating_network_name':
- '${_param:runtest_tempest_public_net}'
- },
- 'share': {
- 'capability_snapshot_support': True,
- 'run_driver_assisted_migration_tests': False,
- 'run_manage_unmanage_snapshot_tests': False,
- 'run_manage_unmanage_tests': False,
- 'run_migration_with_preserve_snapshots_tests': False,
- 'run_quota_tests': True,
- 'run_replication_tests': False,
- 'run_snapshot_tests': True,
- }}}}}
-
class RuntestManager(object):
"""Helper manager for execution tempest via runtest-formula"""
@@ -98,60 +33,121 @@
image_version = settings.TEMPEST_IMAGE_VERSION
container_name = 'run-tempest-ci'
master_host = "cfg01"
- master_tgt = "{}*".format(master_host)
+ control_host = "ctl01"
+ compute_host = "cmp"
class_name = "runtest"
run_cmd = '/bin/bash -c "run-tempest"'
- def __init__(self, underlay, salt_api, cluster_name,
+ def __init__(self, config, underlay, salt_api, cluster_name,
domain_name, tempest_threads,
tempest_pattern=settings.TEMPEST_PATTERN,
run_cmd=None, target='gtw01'):
+ self.__config = config
self.underlay = underlay
self.__salt_api = salt_api
- self.target = target
self.cluster_name = cluster_name
self.domain_name = domain_name
self.tempest_threads = tempest_threads
self.tempest_pattern = tempest_pattern
self.run_cmd = run_cmd or self.run_cmd
+ self.target_name = self.underlay.get_target_node_names(target)[0]
+ self.master_name = self.underlay.get_target_node_names(
+ self.master_host)[0]
+ self.control_name = self.underlay.get_target_node_names(
+ self.control_host)[0]
+ self.compute_name = self.underlay.get_target_node_names(
+ self.compute_host)[0]
+ self.barbican = False
@property
def salt_api(self):
return self.__salt_api
- def install_python_lib(self):
- return self.salt_api.local(
- "{}*".format(self.target),
- 'pip.install', 'docker'), None
+ @property
+ def runtest_pillar(self):
+ public_net = self.__config.underlay.dhcp_ranges[
+ settings.EXTERNAL_ADDRESS_POOL_NAME]
+ public_gateway = public_net["gateway"].encode("ascii")
+ public_cidr = public_net["cidr"].encode("ascii")
+ public_allocation_start = public_net["start"].encode("ascii")
+ public_allocation_end = public_net["end"].encode("ascii")
+ tempest_test_target = self.target_name.encode("ascii") + "*"
- def run_salt_minion_state(self):
- return self.salt_api.local('cfg01*', 'state.sls', 'salt.minion')
+ pillar = {
+ 'classes': ['service.runtest.tempest',
+ 'service.runtest.tempest.public_net',
+ 'service.runtest.tempest.services.manila.glance'],
+ 'parameters': {
+ '_param': {
+ 'runtest_tempest_cfg_dir': TEMPEST_CFG_DIR,
+ 'runtest_tempest_cfg_name': 'tempest.conf',
+ 'runtest_tempest_public_net': 'public',
+ 'openstack_public_neutron_subnet_gateway': public_gateway,
+ 'openstack_public_neutron_subnet_cidr': public_cidr,
+ 'openstack_public_neutron_subnet_allocation_start':
+ public_allocation_start,
+ 'openstack_public_neutron_subnet_allocation_end':
+ public_allocation_end,
+ 'tempest_test_target': tempest_test_target,
+ 'glance_image_cirros_location':
+ 'http://cz8133.bud.mirantis.net:8099'
+ '/cirros-0.3.5-x86_64-disk.img',
+ 'glance_image_fedora_location':
+ 'http://cz8133.bud.mirantis.net:8099'
+ '/Fedora-Cloud-Base-27-1.6.x86_64.qcow2',
+ 'glance_image_manila_location':
+ 'http://cz8133.bud.mirantis.net:8099'
+ '/manila-service-image-master.qcow2',
+ },
+ 'neutron': {
+ 'client': {
+ 'enabled': True
+ }
+ },
+ 'runtest': {
+ 'enabled': True,
+ 'keystonerc_node': 'ctl01*',
+ 'tempest': {
+ 'enabled': True,
+ 'cfg_dir': '${_param:runtest_tempest_cfg_dir}',
+ 'cfg_name': '${_param:runtest_tempest_cfg_name}',
+ 'put_keystone_rc_enabled': True,
+ 'put_local_image_file_enabled': False,
+ 'DEFAULT': {
+ 'log_file': 'tempest.log'
+ },
+ 'compute': {
+ 'min_compute_nodes': 2,
+ },
+ 'convert_to_uuid': {
+ 'network': {
+ 'public_network_id':
+ '${_param:runtest_tempest_public_net}'
+ }
+ },
+ 'heat_plugin': {
+ 'build_timeout': '600'
+ },
+ 'share': {
+ 'capability_snapshot_support': True,
+ 'run_driver_assisted_migration_tests': False,
+ 'run_manage_unmanage_snapshot_tests': False,
+ 'run_manage_unmanage_tests': False,
+ 'run_migration_with_preserve_snapshots_tests':
+ False,
+ 'run_quota_tests': True,
+ 'run_replication_tests': False,
+ 'run_snapshot_tests': True,
+ }}}}}
- def create_networks(self):
- return self.salt_api.local('cfg01*', 'state.sls', 'neutron.client')
+ if self.barbican:
+ pillar['classes'].append('service.runtest.tempest.barbican')
- def create_flavors(self):
- return self.salt_api.local('cfg01*', 'state.sls', 'nova.client')
-
- def set_property(self):
- return self.salt_api.local(
- tgt='ctl01*',
- fun='cmd.run',
- args='. /root/keystonercv3; openstack '
- 'flavor set m1.tiny_test '
- '--property hw:mem_page_size=small')
-
- def create_cirros(self):
- return self.salt_api.local('cfg01*', 'state.sls', 'glance.client')
-
- def generate_config(self):
- return self.salt_api.local('cfg01*', 'state.sls', 'runtest')
+ return pillar
def fetch_arficats(self, username=None, file_format='xml'):
- target_name = next(node_name for node_name
- in self.underlay.node_names() if
- self.target in node_name)
- with self.underlay.remote(node_name=target_name, username=None) as tgt:
+ with self.underlay.remote(node_name=self.target_name,
+ username=None) as tgt:
result = tgt.execute('find {} -name "report_*.{}"'.format(
TEMPEST_CFG_DIR, file_format))
LOG.debug("Find result {0}".format(result))
@@ -163,112 +159,191 @@
destination=report, # noqa
target=os.getcwd())
- def store_runtest_model(self, config=CONFIG):
- master_name = next(node_name for node_name
- in self.underlay.node_names() if
- self.master_host in node_name)
+ def store_runtest_model(self, runtest_pillar=None):
with self.underlay.yaml_editor(
file_path="/srv/salt/reclass/classes/cluster/"
"{cluster_name}/infra/"
"{class_name}.yml".format(
cluster_name=self.cluster_name,
class_name=self.class_name),
- node_name=master_name) as editor:
- editor.content = config
+ node_name=self.master_name) as editor:
+ editor.content = runtest_pillar or self.runtest_pillar
with self.underlay.yaml_editor(
file_path="/srv/salt/reclass/nodes/_generated/"
"cfg01.{domain_name}.yml".format(
domain_name=self.domain_name),
- node_name=master_name) as editor:
+ node_name=self.master_name) as editor:
editor.content['classes'].append(
'cluster.{cluster_name}.infra.{class_name}'.format(
cluster_name=self.cluster_name,
class_name=self.class_name))
- self.salt_api.local('*', 'saltutil.refresh_pillar')
- self.salt_api.local('*', 'saltutil.sync_all')
-
def save_runtime_logs(self, logs=None, inspect=None):
if logs:
with open("{path}/{target}_tempest_run.log".format(
- path=settings.LOGS_DIR, target=self.target), 'w') as f:
+ path=settings.LOGS_DIR,
+ target=self.target_name), 'w') as f:
LOG.info("Save tempest console log")
container_log = logs
f.write(container_log.encode('ascii', 'ignore'))
if inspect:
with open("{path}/{target}_tempest_container_info.json.log".format(
- path=settings.LOGS_DIR, target=self.target), 'w') as f:
+ path=settings.LOGS_DIR,
+ target=self.target_name), 'w') as f:
LOG.info("Save tempest container inspect data")
container_inspect = json.dumps(inspect,
indent=4, sort_keys=True)
f.write(container_inspect)
- def prepare(self, dpdk=None):
+ def prepare(self):
+ barbican_pillar = "nova:controller:barbican:enabled"
+ result = self.__salt_api.get_pillar(tgt=self.control_name,
+ pillar=barbican_pillar)
+ self.barbican = result[0].get(self.control_name, False)
self.store_runtest_model()
+ cirros_pillar = ("salt-call --out=newline_values_only "
+ "pillar.get "
+ "glance:client:identity:"
+ "admin_identity:image:cirros:location")
+ dpdk_pillar = "linux:network:dpdk:enabled"
+ salt_cmd = "salt -l info --hard-crash --state-output=mixed "
+ salt_call_cmd = "salt-call -l info --hard-crash --state-output=mixed "
- res = self.install_python_lib()
- LOG.info(json.dumps(res, indent=4))
+ result = self.__salt_api.get_pillar(tgt=self.compute_name,
+ pillar=dpdk_pillar)
- res = self.run_salt_minion_state()
- LOG.info(json.dumps(res, indent=4))
- time.sleep(20)
+ dpdk = result[0].get(self.compute_name, False)
+ LOG.info("DPDK enabled: {}".format(bool(dpdk)))
- res = self.create_networks()
- LOG.info(json.dumps(res, indent=4))
- time.sleep(20)
+ commands = [
+ {
+ 'description': "Sync salt objects for runtest model",
+ 'node_name': self.master_name,
+ 'cmd': ("set -ex;" +
+ salt_cmd + "'*' saltutil.refresh_pillar && " +
+ salt_cmd + "'*' saltutil.sync_all")},
+ {
+ 'description': ("Install docker.io package and "
+ "enable packets forwarding"),
+ 'node_name': self.target_name,
+ 'cmd': ("set -ex;" +
+ salt_call_cmd + " pkg.install docker.io && " +
+ " iptables --policy FORWARD ACCEPT")},
+ {
+ 'description': "Install PyPI docker package",
+ 'node_name': self.target_name,
+ 'cmd': ("set -ex;" +
+ salt_call_cmd + " pip.install setuptools && " +
+ salt_call_cmd + " pip.install docker")},
+ {
+ 'description': "Run salt.minion state for runtest formula",
+ 'node_name': self.master_name,
+ 'cmd': ("set -ex;" +
+ salt_call_cmd + " state.sls salt.minion && "
+ " sleep 20")},
+ {
+ 'description': "Enforce keystone state for neutronv2",
+ 'node_name': self.master_name,
+ 'cmd': ("set -ex;" +
+ salt_call_cmd + " state.sls keystone.client")},
+ {
+ 'description': "Create networks for Tempest tests",
+ 'node_name': self.master_name,
+ 'cmd': ("set -ex;" +
+ salt_call_cmd + " state.sls neutron.client")},
+ {
+ 'description': "Create flavors for Tempest tests",
+ 'node_name': self.master_name,
+ 'cmd': ("set -ex;" +
+ salt_call_cmd + " state.sls nova.client")},
+ {
+ 'description': "Upload images for Tempest",
+ 'node_name': self.master_name,
+ 'cmd': ("set -ex;" +
+ salt_call_cmd + " state.sls glance.client")},
+ {
+ 'description': "Generate config for Tempest",
+ 'node_name': self.master_name,
+ 'cmd': ("set -ex;" +
+ salt_call_cmd + " state.sls runtest")},
+ {
+ 'description': "Upload cirros image",
+ 'node_name': self.master_name,
+ 'cmd': ("set -ex;"
+ "cirros_url=$({}) && {} '{}' cmd.run "
+ "\"wget $cirros_url -O /tmp/TestCirros-0.3.5.img\""
+ .format(cirros_pillar, salt_cmd, self.target_name))},
+ ]
- res = self.create_flavors()
- LOG.info(json.dumps(res, indent=4))
- time.sleep(20)
if dpdk:
- res = self.set_property()
- LOG.info('Update flavor property')
- LOG.info(json.dumps(res, indent=4))
- time.sleep(20)
+ commands.append({
+ 'description': "Configure flavor for DPDK",
+ 'node_name': self.control_name,
+ 'cmd': ("set -ex;" +
+ salt_call_cmd + " cmd.run "
+ " '. /root/keystonercv3;"
+ " openstack flavor set m1.extra_tiny_test"
+ " --property hw:mem_page_size=any;"
+ " openstack flavor set m1.tiny_test"
+ " --property hw:mem_page_size=any'")},
+ )
- res = self.create_cirros()
- LOG.info(json.dumps(res, indent=4))
- time.sleep(20)
+ if self.barbican:
+ commands.append({
+ 'description': "Configure barbican",
+ 'node_name': self.master_name,
+ 'cmd': ("set -ex;" +
+ salt_call_cmd +
+ " state.sls barbican.client && " +
+ salt_call_cmd +
+ " state.sls runtest.test_accounts && " +
+ salt_call_cmd +
+ " state.sls runtest.barbican_sign_image")},
+ )
- res = self.generate_config()
- LOG.info(json.dumps(res, indent=4))
- time.sleep(20)
+ self.__salt_api.execute_commands(commands=commands,
+ label="Prepare for Tempest")
def run_tempest(self, timeout=600):
- tgt = "{}*".format(self.target)
- params = {
- "name": self.container_name,
- "image": "{}:{}".format(self.image_name, self.image_version),
- "environment": {
- "ARGS": "-r {tempest_pattern} -w "
- "{tempest_threads} ".format(
- tempest_pattern=self.tempest_pattern,
- tempest_threads=self.tempest_threads) # noqa
- },
- "binds": [
- "{cfg_dir}/tempest.conf:/etc/tempest/tempest.conf".format(cfg_dir=TEMPEST_CFG_DIR), # noqa
- "/tmp/:/tmp/",
- "{cfg_dir}:/root/tempest".format(cfg_dir=TEMPEST_CFG_DIR),
- "/etc/ssl/certs/:/etc/ssl/certs/"
- ],
- "auto_remove": False,
- "cmd": self.run_cmd
- }
+ tgt = self.target_name
+ image_nameversion = "{}:{}".format(self.image_name, self.image_version)
- res = self.salt_api.local(tgt, 'dockerng.pull', "{}:{}".format(
- self.image_name, self.image_version))
- LOG.info("Tempest image has beed pulled- \n{}".format(
- json.dumps(res, indent=4)))
+ docker_args = (
+ " -t "
+ " --name {container_name} "
+ " -e ARGS=\"-r {tempest_pattern} -w {tempest_threads}\""
+ " -v {cfg_dir}/tempest.conf:/etc/tempest/tempest.conf"
+ " -v /tmp/:/tmp/"
+ " -v {cfg_dir}:/root/tempest"
+ " -v /etc/ssl/certs/:/etc/ssl/certs/"
+ " -d "
+ " {image_nameversion} {run_cmd}"
+ .format(
+ container_name=self.container_name,
+ image_nameversion=image_nameversion,
+ tempest_pattern=self.tempest_pattern,
+ tempest_threads=self.tempest_threads,
+ cfg_dir=TEMPEST_CFG_DIR,
+ run_cmd=self.run_cmd,
+ ))
- res = self.salt_api.local(tgt, 'dockerng.create', kwargs=params)
- LOG.info("Tempest container has been created - \n{}".format(
- json.dumps(res, indent=4)))
+ commands = [
+ {
+ 'description': "Run Tempest tests {0}".format(
+ image_nameversion),
+ 'node_name': self.target_name,
+ 'cmd': ("set -ex;" +
+ " docker rm --force {container_name} || true;"
+ " docker run {docker_args}"
+ .format(container_name=self.container_name,
+ docker_args=docker_args)),
+ 'timeout': timeout},
+ ]
- res = self.salt_api.local(tgt, 'dockerng.start', self.container_name)
- LOG.info("Tempest container has been started - \n{}".format(
- json.dumps(res, indent=4)))
+ self.__salt_api.execute_commands(commands=commands,
+ label="Run Tempest tests")
def wait_status(s):
inspect_res = self.salt_api.local(tgt,
@@ -294,18 +369,11 @@
self.container_name)
inspect = inspect_res['return'][0]
inspect = next(inspect.iteritems())[1]
- if inspect['State']['ExitCode'] != 0:
- LOG.error("Tempest running failed")
- LOG.info("Tempest tests have been finished - \n{}".format(
- json.dumps(res, indent=4)))
-
logs_res = self.salt_api.local(tgt,
'dockerng.logs',
self.container_name)
logs = logs_res['return'][0]
logs = next(logs.iteritems())[1]
- LOG.info("Tempest result - \n{}".format(
- logs.encode('ascii', 'ignore')))
res = self.salt_api.local(tgt, 'dockerng.rm', self.container_name)
LOG.info("Tempest container was removed".format(
@@ -314,12 +382,12 @@
return {'inspect': inspect,
'logs': logs}
- def prepare_and_run_tempest(self, username='root', dpdk=None):
+ def prepare_and_run_tempest(self, username='root'):
"""
Run tempest tests
"""
tempest_timeout = settings.TEMPEST_TIMEOUT
- self.prepare(dpdk=dpdk)
+ self.prepare()
test_res = self.run_tempest(tempest_timeout)
self.fetch_arficats(username=username)
self.save_runtime_logs(**test_res)
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index b5d5f04..a468b02 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -104,11 +104,12 @@
self.__session_start = login()
return self.__api
- def local(self, tgt, fun, args=None, kwargs=None):
- return self.api.local(tgt, fun, args, kwargs, expr_form='compound')
+ def local(self, tgt, fun, args=None, kwargs=None, timeout=None):
+ return self.api.local(tgt, fun, args, kwargs, timeout=timeout,
+ expr_form='compound')
- def local_async(self, tgt, fun, args=None, kwargs=None):
- return self.api.local_async(tgt, fun, args, kwargs)
+ def local_async(self, tgt, fun, args=None, kwargs=None, timeout=None):
+ return self.api.local_async(tgt, fun, args, kwargs, timeout=timeout)
def lookup_result(self, jid):
return self.api.lookup_jid(jid)
@@ -138,25 +139,27 @@
return fails if fails else None
- def enforce_state(self, tgt, state, args=None, kwargs=None):
- r = self.local(tgt=tgt, fun='state.sls', args=state)
+ def enforce_state(self, tgt, state, args=None, kwargs=None, timeout=None):
+ r = self.local(tgt=tgt, fun='state.sls', args=state, timeout=timeout)
f = self.check_result(r)
return r, f
- def enforce_states(self, tgt, state, args=None, kwargs=None):
+ def enforce_states(self, tgt, state, args=None, kwargs=None, timeout=None):
rets = []
for s in state:
- r = self.enforce_state(tgt=tgt, state=s)
+ r = self.enforce_state(tgt=tgt, state=s, timeout=timeout)
rets.append(r)
return rets
- def run_state(self, tgt, state, args=None, kwargs=None):
- return self.local(tgt=tgt, fun=state, args=args, kwargs=kwargs), None
+ def run_state(self, tgt, state, args=None, kwargs=None, timeout=None):
+ return self.local(tgt=tgt, fun=state, args=args, kwargs=kwargs,
+ timeout=timeout), None
- def run_states(self, tgt, state, args=None, kwargs=None):
+ def run_states(self, tgt, state, args=None, kwargs=None, timeout=None):
rets = []
for s in state:
- r = self.run_state(tgt=tgt, state=s, args=args, kwargs=kwargs)
+ r = self.run_state(tgt=tgt, state=s, args=args, kwargs=kwargs,
+ timeout=timeout)
rets.append(r)
return rets
@@ -185,13 +188,14 @@
if len(hosts) == 0:
raise LookupError("Hosts is empty or absent")
- def host(node_name, ip):
+ def host(minion_id, ip):
return {
'roles': ['salt_minion'],
'keys': [
k['private'] for k in self.__config.underlay.ssh_keys
],
- 'node_name': node_name,
+ 'node_name': minion_id,
+ 'minion_id': minion_id,
'host': ip,
'address_pool': pool_name,
'login': settings.SSH_NODE_CREDENTIALS['login'],
@@ -213,6 +217,25 @@
host_list={k: v['ipv4'] for k, v in hosts.items()}))
raise StopIteration(msg)
+ def update_ssh_data_from_minions(self):
+ """Combine existing underlay.ssh with VCP salt minions"""
+ salt_nodes = self.get_ssh_data()
+
+ for salt_node in salt_nodes:
+ nodes = [n for n in self.__config.underlay.ssh
+ if salt_node['host'] == n['host']
+ and salt_node['address_pool'] == n['address_pool']]
+ if nodes:
+ # Assume that there can be only one node with such IP address
+ # Just update minion_id for this node
+ nodes[0]['minion_id'] = salt_node['minion_id']
+ else:
+ # New node, add to config.underlay.ssh
+ self.__config.underlay.ssh.append(salt_node)
+
+ self.__underlay.config_ssh = []
+ self.__underlay.add_config_ssh(self.__config.underlay.ssh)
+
def service_status(self, tgt, service):
result = self.local(tgt=tgt, fun='service.status', args=service)
return result['return']
diff --git a/tcp_tests/managers/sl_manager.py b/tcp_tests/managers/sl_manager.py
index 3bb0a1f..f3ccef8 100644
--- a/tcp_tests/managers/sl_manager.py
+++ b/tcp_tests/managers/sl_manager.py
@@ -41,7 +41,6 @@
def install(self, commands, label='Install SL services'):
self.execute_commands(commands, label=label)
self.__config.stack_light.stacklight_installed = True
- self.__config.stack_light.sl_vip_host = self.get_sl_vip()
def get_sl_vip(self):
tgt = 'I@prometheus:server:enabled:True'
@@ -76,6 +75,7 @@
@property
def api(self):
if self._p_client is None:
+ self.__config.stack_light.sl_vip_host = self.get_sl_vip()
self._p_client = prometheus_client.PrometheusClient(
host=self.__config.stack_light.sl_vip_host,
port=self.__config.stack_light.sl_prometheus_port,
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index ee23654..66f686b 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -39,6 +39,7 @@
[
{
node_name: node1,
+ minion_id: node1.local,
address_pool: 'public-pool01',
host: ,
port: ,
@@ -50,6 +51,7 @@
},
{
node_name: node1,
+ minion_id: node1.local,
address_pool: 'private-pool01',
host:
port:
@@ -61,6 +63,7 @@
},
{
node_name: node2,
+ minion_id: node2.local,
address_pool: 'public-pool01',
keys_source_host: node1
...
@@ -75,7 +78,6 @@
"""
__config = None
config_ssh = None
- config_lvm = None
def __init__(self, config):
"""Read config.underlay.ssh object
@@ -86,9 +88,6 @@
if self.config_ssh is None:
self.config_ssh = []
- if self.config_lvm is None:
- self.config_lvm = {}
-
self.add_config_ssh(self.__config.underlay.ssh)
def add_config_ssh(self, config_ssh):
@@ -100,6 +99,7 @@
ssh_data = {
# Required keys:
'node_name': ssh['node_name'],
+ 'minion_id': ssh['minion_id'],
'host': ssh['host'],
'login': ssh['login'],
'password': ssh['password'],
@@ -126,6 +126,7 @@
ssh_data = {
# Required keys:
'node_name': ssh['node_name'],
+ 'minion_id': ssh['minion_id'],
'host': ssh['host'],
'login': ssh['login'],
'password': ssh['password'],
@@ -147,7 +148,7 @@
return keys
def __ssh_data(self, node_name=None, host=None, address_pool=None,
- node_role=None):
+ node_role=None, minion_id=None):
ssh_data = None
@@ -175,6 +176,16 @@
break
else:
ssh_data = ssh
+ elif minion_id is not None:
+ for ssh in self.config_ssh:
+ if minion_id == ssh['minion_id']:
+ if address_pool is not None:
+ if address_pool == ssh['address_pool']:
+ ssh_data = ssh
+ break
+ else:
+ ssh_data = ssh
+
if ssh_data is None:
LOG.debug("config_ssh - {}".format(self.config_ssh))
raise Exception('Auth data for node was not found using '
@@ -191,41 +202,14 @@
names.append(ssh['node_name'])
return names
- def enable_lvm(self, lvmconfig):
- """Method for enabling lvm oh hosts in environment
+ def minion_ids(self):
+ """Get list of minion ids registered in config.underlay.ssh"""
- :param lvmconfig: dict with ids or device' names of lvm storage
- :raises: devops.error.DevopsCalledProcessError,
- devops.error.TimeoutError, AssertionError, ValueError
- """
- def get_actions(lvm_id):
- return [
- "systemctl enable lvm2-lvmetad.service",
- "systemctl enable lvm2-lvmetad.socket",
- "systemctl start lvm2-lvmetad.service",
- "systemctl start lvm2-lvmetad.socket",
- "pvcreate {} && pvs".format(lvm_id),
- "vgcreate default {} && vgs".format(lvm_id),
- "lvcreate -L 1G -T default/pool && lvs",
- ]
- lvmpackages = ["lvm2", "liblvm2-dev", "thin-provisioning-tools"]
- for node_name in self.node_names():
- lvm = lvmconfig.get(node_name, None)
- if not lvm:
- continue
- if 'id' in lvm:
- lvmdevice = '/dev/disk/by-id/{}'.format(lvm['id'])
- elif 'device' in lvm:
- lvmdevice = '/dev/{}'.format(lvm['device'])
- else:
- raise ValueError("Unknown LVM device type")
- if lvmdevice:
- self.apt_install_package(
- packages=lvmpackages, node_name=node_name, verbose=True)
- for command in get_actions(lvmdevice):
- self.sudo_check_call(command, node_name=node_name,
- verbose=True)
- self.config_lvm = dict(lvmconfig)
+ ids = [] # List is used to keep the original order of ids
+ for ssh in self.config_ssh:
+ if ssh['minion_id'] not in ids:
+ ids.append(ssh['minion_id'])
+ return ids
def host_by_node_name(self, node_name, address_pool=None):
ssh_data = self.__ssh_data(node_name=node_name,
@@ -237,6 +221,11 @@
address_pool=address_pool)
return ssh_data['host']
+ def host_by_minion_id(self, minion_id, address_pool=None):
+ ssh_data = self.__ssh_data(minion_id=minion_id,
+ address_pool=address_pool)
+ return ssh_data['host']
+
def remote(self, node_name=None, host=None, address_pool=None,
username=None):
"""Get SSHClient by a node name or hostname.
@@ -438,12 +427,12 @@
"docker ps > /root/\$(hostname -f)/dump_docker_ps.txt;"
"docker service ls > "
" /root/\$(hostname -f)/dump_docker_services_ls.txt;"
- "for SERVICE in \$(docker service ls | awk '{ print $2 }'); "
+ "for SERVICE in \$(docker service ls | awk '{ print \$2 }'); "
" do docker service ps --no-trunc 2>&1 \$SERVICE >> "
" /root/\$(hostname -f)/dump_docker_service_ps.txt;"
" done;"
- "for SERVICE in \$(docker service ls | awk '{ print $2 }'); "
- " do docker service logs 2>&1 \$SERVICE > "
+ "for SERVICE in \$(docker service ls | awk '{ print \$2 }'); "
+ " do timeout 30 docker service logs --no-trunc 2>&1 \$SERVICE > "
" /root/\$(hostname -f)/dump_docker_service_\${SERVICE}_logs;"
" done;"
"vgdisplay > /root/\$(hostname -f)/dump_vgdisplay.txt;"
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index 83461ab..54c2cdd 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -5,11 +5,11 @@
paramiko
six
requests>=2.2.0
-oslo.config>=6.2.1 # Apache-2.0
+oslo.config>=6.2.1,<6.6.0 # Apache-2.0
pytest>=2.9,<=3.2.5
docker-py
docker-compose==1.7.1
-urllib3
+urllib3==1.23
junit-xml
jinja2>=2.9
jira
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index 0d79cc4..fca6a6d 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -77,7 +77,7 @@
'docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest') # noqa
TEMPEST_IMAGE_VERSION = os.environ.get('TEMPEST_IMAGE_VERSION', 'pike')
TEMPEST_PATTERN = os.environ.get('TEMPEST_PATTERN', 'tempest')
-TEMPEST_TIMEOUT = int(os.environ.get('TEMPEST_TIMEOUT', 60 * 60 * 5))
+TEMPEST_TIMEOUT = int(os.environ.get('TEMPEST_TIMEOUT', 60 * 60 * 6))
TEMPEST_THREADS = int(os.environ.get('TEMPEST_THREADS', 2))
TEMPEST_TARGET = os.environ.get('TEMPEST_TARGET', 'gtw01')
SALT_VERSION = os.environ.get('SALT_VERSION', '2017.7')
@@ -85,3 +85,6 @@
SL_TEST_REPO = os.environ.get('SL_TEST_REPO',
'https://github.com/Mirantis/stacklight-pytest')
SL_TEST_COMMIT = os.environ.get('SL_TEST_COMMIT', 'master')
+
+EXTERNAL_ADDRESS_POOL_NAME = os.environ.get('EXTERNAL_ADDRESS_POOL_NAME',
+ 'external-pool01')
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 037dbd8..0a447d6 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -53,6 +53,9 @@
_default_openstack_steps = pkg_resources.resource_filename(
__name__, 'templates/{0}/openstack.yaml'.format(
settings.LAB_CONFIG_NAME))
+_default_openstack_resources_steps = pkg_resources.resource_filename(
+ __name__, 'templates/{0}/post_openstack.yaml'.format(
+ settings.LAB_CONFIG_NAME))
_default_opencontrail_prepare_tests_steps_path = \
pkg_resources.resource_filename(
__name__, 'templates/{0}/opencontrail.yaml'.format(
@@ -93,6 +96,7 @@
ct.Cfg('ssh', ct.JSONList(),
help="""SSH Settings for Underlay: [{
'node_name': node1,
+ 'minion_id': node1.local,
'roles': ['salt-master', 'salt-minion', ],
'host': hostname,
'login': login,
@@ -115,14 +119,28 @@
ct.Cfg('upstream_dns_servers', ct.JSONList(),
help="IP addresses of upstream DNS servers (dnsmasq)",
default=[]),
- ct.Cfg('lvm', ct.JSONDict(),
- help="LVM settings for Underlay", default={}),
ct.Cfg('address_pools', ct.JSONDict(),
help="""Address pools (dynamically) allocated for the environment.
May be used to determine CIDR for a specific network from
tests or during the deployment process.
{'pool_name1': '<cidr>', 'pool_name2': '<cidr>', ...}""",
default={}),
+ ct.Cfg('dhcp_ranges', ct.JSONDict(),
+ help="""DHCP ranges allocated for the address pools.
+ This is extended object comparing to 'address_pools'.
+ May be used to determine DHCP range start/end/gateway for a
+ specific network from tests or during the deployment
+ process.
+ {'pool_name1': {'cidr': 'n.n.n.n/m',
+ 'start': 'x.x.x.x',
+ 'end': 'y.y.y.y',
+ 'gateway': 'z.z.z.z'},
+ 'pool_name2': {'cidr': 'n.n.n.n/m',
+ 'start': x.x.x.x,
+ 'end': 'y.y.y.y',
+ 'gateway': 'z.z.z.z'},
+ ...}""",
+ default={}),
ct.Cfg('ssh_keys', ct.JSONList(), default=[],
help="SSH key pair(s) for root. If the option is left empty, "
"then a key pair will be generated automatically"),
@@ -203,6 +221,9 @@
ct.Cfg('openstack_steps_path', ct.String(),
help="Path to YAML with steps to deploy openstack",
default=_default_openstack_steps),
+ ct.Cfg('openstack_resources_steps_path', ct.String(),
+ help="Path to YAML with steps to deploy openstack",
+ default=_default_openstack_resources_steps),
ct.Cfg('horizon_host', ct.IPAddress(),
help="", default='0.0.0.0'),
ct.Cfg('horizon_port', ct.String(),
@@ -305,10 +326,10 @@
default='sbPfel23ZigJF3Bm'),
ct.Cfg('kubernetes_docker_package', ct.String(), default=''),
ct.Cfg('kubernetes_hyperkube_image', ct.String(),
- default='{}/mirantis/kubernetes/hyperkube-amd64:v1.10.4-4'.format(
+ default='{}/mirantis/kubernetes/hyperkube-amd64:v1.11.3-2'.format(
settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_pause_image', ct.String(),
- default='{}/mirantis/kubernetes/pause-amd64:v1.10.4-4'.format(
+ default='{}/mirantis/kubernetes/pause-amd64:v1.11.3-2'.format(
settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_calico_image', ct.String(),
default='{}/mirantis/projectcalico/calico/node:v3.1.3'.format(
@@ -335,19 +356,22 @@
ct.Cfg('kubernetes_virtlet_enabled', ct.Boolean(),
help="", default=False),
ct.Cfg('kubernetes_virtlet_image', ct.String(),
- help="", default='mirantis/virtlet:v1.1.0'),
+ help="", default='mirantis/virtlet:v1.4.1'),
ct.Cfg('kubernetes_dns', ct.Boolean(),
help="", default=True),
ct.Cfg('kubernetes_externaldns_enabled', ct.Boolean(),
help="", default=False),
ct.Cfg('kubernetes_externaldns_image', ct.String(),
- help="", default='mirantis/external-dns:latest'),
+ help="", default='{}/mirantis/external-dns/external-dns:'
+ 'v0.5.6-2'.format(settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_externaldns_provider', ct.String(),
help="", default='coredns'),
ct.Cfg('kubernetes_coredns_enabled', ct.Boolean(),
- help="", default=False),
+ help="", default=True),
ct.Cfg('kubernetes_metallb_enabled', ct.Boolean(),
help="", default=False),
+ ct.Cfg('kubernetes_ingressnginx_enabled', ct.Boolean(),
+ help="", default=False),
ct.Cfg('kubelet_fail_on_swap', ct.Boolean(),
help="", default=False)
]
@@ -365,9 +389,9 @@
default=False),
ct.Cfg('k8s_conformance_image', ct.String(),
default='docker-prod-virtual.docker.mirantis.net/mirantis/'
- 'kubernetes/k8s-conformance:v1.10.4-4'),
+ 'kubernetes/k8s-conformance:v1.11.3-2'),
ct.Cfg('k8s_update_chain', ct.String(),
- default='v1.9.8-4 v1.10.4-4')
+ default='v1.9.8-4 v1.10.4-4 v1.11.3-2')
]
day1_cfg_config_opts = [
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/core.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/core.yaml
index 0aebf89..fc35f88 100644
--- a/tcp_tests/templates/cookied-bm-contrail-maas/core.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/core.yaml
@@ -1,118 +1,19 @@
{% from 'cookied-bm-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/openstack.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/openstack.yaml
index 737e8e0..65cd68b 100644
--- a/tcp_tests/templates/cookied-bm-contrail-maas/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/openstack.yaml
@@ -7,7 +7,7 @@
# Install OpenStack control services
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-cookiecutter-contrail.yaml
index e053de3..02cf300 100644
--- a/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-cookiecutter-contrail.yaml
@@ -43,7 +43,7 @@
control_vlan: '2422'
cookiecutter_template_branch: ''
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.49.65
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
@@ -165,7 +165,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 172.16.49.66
shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
stacklight_enabled: 'True'
stacklight_log_address: 10.167.8.60
stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml
index 57fbb49..51dfc5d 100644
--- a/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml
@@ -6,7 +6,7 @@
{% from 'cookied-bm-contrail-maas/underlay.yaml' import MAAS_DHCP_POOL_START with context %}
{% from 'cookied-bm-contrail-maas/underlay.yaml' import MAAS_DHCP_POOL_END with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-contrail-maas') %}
@@ -42,9 +42,9 @@
set -e;
# Remove rack01 key
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
# Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+ reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml
index 1e318a5..6617855 100644
--- a/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml
@@ -52,6 +52,13 @@
- export MAAS_DHCP_POOL_END={{ os_env('MAAS_DHCP_POOL_END', '172.16.49.119') }}
- ifconfig $MAAS_PXE_INTERFACE_NAME $MAAS_PXE_INTERFACE_ADDRESS/$MAAS_DHCP_POOL_NETMASK_PREFIX
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/core.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/core.yaml
index 129693d..64f01fa 100644
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/core.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/core.yaml
@@ -1,118 +1,19 @@
{% from 'cookied-bm-contrail-nfv-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/openstack.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/openstack.yaml
index 47c05e2..6e9e2c2 100644
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/openstack.yaml
@@ -7,7 +7,7 @@
# Install OpenStack control services
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt-context-cookiecutter-contrail.yaml
index 300039a..0b00542 100644
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt-context-cookiecutter-contrail.yaml
@@ -43,7 +43,7 @@
control_vlan: '2422'
cookiecutter_template_branch: ''
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.49.65
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
@@ -170,7 +170,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 172.16.49.66
shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
stacklight_enabled: 'True'
stacklight_log_address: 10.167.8.60
stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml
index 8f84a3a..31e9736 100644
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml
@@ -6,7 +6,7 @@
{% from 'cookied-bm-contrail-nfv-maas/underlay.yaml' import MAAS_DHCP_POOL_START with context %}
{% from 'cookied-bm-contrail-nfv-maas/underlay.yaml' import MAAS_DHCP_POOL_END with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-contrail-nfv-maas') %}
@@ -42,9 +42,9 @@
set -e;
# Remove rack01 key
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
# Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+ reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--user-data-cfg01.yaml
index 1e318a5..6617855 100644
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--user-data-cfg01.yaml
@@ -52,6 +52,13 @@
- export MAAS_DHCP_POOL_END={{ os_env('MAAS_DHCP_POOL_END', '172.16.49.119') }}
- ifconfig $MAAS_PXE_INTERFACE_NAME $MAAS_PXE_INTERFACE_ADDRESS/$MAAS_DHCP_POOL_NETMASK_PREFIX
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml
index 2675136..34c254d 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml
@@ -3,124 +3,20 @@
{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_KVM02 with context %}
{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_KVM03 with context %}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Setup glusterfs on primary controller
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install nginx on prx nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the OpenStack control VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
- echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/master_config.sh b/tcp_tests/templates/cookied-bm-contrail40-nfv/master_config.sh
deleted file mode 100644
index 0fc5723..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/master_config.sh
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/bin/bash -xe
-
-export SALT_MASTER_DEPLOY_IP=${SALT_MASTER_DEPLOY_IP:-"172.16.49.66"}
-export SALT_MASTER_MINION_ID=${SALT_MASTER_MINION_ID:-"cfg01.cookied-bm-contrail40-nfv.local"}
-export DEPLOY_NETWORK_GW=${DEPLOY_NETWORK_GW:-"172.16.49.65"}
-export DEPLOY_NETWORK_NETMASK=${DEPLOY_NETWORK_NETMASK:-"255.255.255.192"}
-export DNS_SERVERS=${DNS_SERVERS:-"172.18.208.44"}
-export http_proxy=${http_proxy:-""}
-export https_proxy=${https_proxy:-""}
-export PIPELINES_FROM_ISO=${PIPELINES_FROM_ISO:-"false"}
-export PIPELINE_REPO_URL=${PIPELINE_REPO_URL:-"https://github.com/Mirantis"}
-export MCP_VERSION=${MCP_VERSION:-"proposed"}
-export MCP_SALT_REPO_KEY=${MCP_SALT_REPO_KEY:-"http://apt.mirantis.com/public.gpg"}
-export MCP_SALT_REPO_URL=${MCP_SALT_REPO_URL:-"http://apt.mirantis.com/xenial"}
-export MCP_SALT_REPO="deb [arch=amd64] $MCP_SALT_REPO_URL $MCP_VERSION salt"
-export FORMULAS="salt-formula-*"
-# Not avaible in 2018.4 and pre.
-export LOCAL_REPOS=false
-#for cloning from aptly image use port 8088
-#export PIPELINE_REPO_URL=http://172.16.47.182:8088
-
-function _apt_cfg(){
- # TODO remove those function after 2018.4 release
- echo "Acquire::CompressionTypes::Order gz;" >/etc/apt/apt.conf.d/99compression-workaround-salt
- echo "Acquire::EnableSrvRecords false;" >/etc/apt/apt.conf.d/99enablesrvrecords-false
- echo "Acquire::http::Pipeline-Depth 0;" > /etc/apt/apt.conf.d/99aws-s3-mirrors-workaround-salt
- echo "APT::Install-Recommends false;" > /etc/apt/apt.conf.d/99dont_install_recommends-salt
- echo "APT::Install-Suggests false;" > /etc/apt/apt.conf.d/99dont_install_suggests-salt
- echo "Acquire::Languages none;" > /etc/apt/apt.conf.d/99dont_acquire_all_languages-salt
- echo "APT::Periodic::Update-Package-Lists 0;" > /etc/apt/apt.conf.d/99dont_update_package_list-salt
- echo "APT::Periodic::Download-Upgradeable-Packages 0;" > /etc/apt/apt.conf.d/99dont_update_download_upg_packages-salt
- echo "APT::Periodic::Unattended-Upgrade 0;" > /etc/apt/apt.conf.d/99disable_unattended_upgrade-salt
- echo "INFO: cleaning sources lists"
- rm -rv /etc/apt/sources.list.d/* || true
- echo > /etc/apt/sources.list || true
-}
-
-function _post_maas_cfg(){
- local PROFILE=mirantis
- # TODO: remove those check, and use only new version, adfter 2018.4 release
- if [[ -f /var/lib/maas/.maas_login.sh ]]; then
- /var/lib/maas/.maas_login.sh
- else
- echo "WARNING: Attempt to use old maas login schema.."
- TOKEN=$(cat /var/lib/maas/.maas_credentials);
- maas list | cut -d' ' -f1 | xargs -I{} maas logout {}
- maas login $PROFILE http://127.0.0.1:5240/MAAS/api/2.0/ "${TOKEN}"
- fi
- # disable backports for maas enlist pkg repo
- maas ${PROFILE} package-repository update 1 "disabled_pockets=backports"
- maas ${PROFILE} package-repository update 1 "arches=amd64"
- # Download ubuntu image from MAAS local mirror
- if [[ "$LOCAL_REPOS" == "true" ]] ; then
- maas ${PROFILE} boot-source-selections create 2 os="ubuntu" release="xenial" arches="amd64" subarches="*" labels="*"
- echo "WARNING: Removing default MAAS stream:"
- maas ${PROFILE} boot-source read 1
- maas ${PROFILE} boot-source delete 1
- maas ${PROFILE} boot-resources import
- # TODO wait for finish,and stop import.
- else
- maas ${PROFILE} boot-source-selections create 1 os="ubuntu" release="xenial" arches="amd64" subarches="*" labels="*"
- maas ${PROFILE} boot-resources import
- fi
- while [ ! -d /var/lib/maas/boot-resources/current/ubuntu/amd64/generic/xenial ]
- do
- sleep 10
- echo "WARNING: Image is still not ready"
- done
-}
-
-### Body
-echo "Preparing metadata model"
-mount /dev/cdrom /mnt/
-cp -rT /mnt/model/model /srv/salt/reclass
-chown -R root:root /srv/salt/reclass/*
-chown -R root:root /srv/salt/reclass/.git* || true
-chmod -R 644 /srv/salt/reclass/classes/cluster/* || true
-chmod -R 644 /srv/salt/reclass/classes/system/* || true
-
-echo "Configuring salt"
-#service salt-master restart
-envsubst < /root/minion.conf > /etc/salt/minion.d/minion.conf
-service salt-minion restart
-while true; do
- salt-key | grep "$SALT_MASTER_MINION_ID" && break
- sleep 5
-done
-sleep 5
-for i in $(salt-key -l accepted | grep -v Accepted | grep -v "$SALT_MASTER_MINION_ID"); do
- salt-key -d $i -y
-done
-
-find /var/lib/jenkins/jenkins.model.JenkinsLocationConfiguration.xml -type f -print0 | xargs -0 sed -i -e 's/10.167.4.15/'$SALT_MASTER_DEPLOY_IP'/g'
-
-echo "updating git repos"
-if [[ "$PIPELINES_FROM_ISO" == "true" ]] ; then
- cp -r /mnt/mk-pipelines/* /home/repo/mk/mk-pipelines/
- cp -r /mnt/pipeline-library/* /home/repo/mcp-ci/pipeline-library/
- umount /dev/cdrom || true
- chown -R git:www-data /home/repo/mk/mk-pipelines/*
- chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*
-else
- umount /dev/cdrom || true
- git clone --mirror "${PIPELINE_REPO_URL}/mk-pipelines.git" /home/repo/mk/mk-pipelines/
- git clone --mirror "${PIPELINE_REPO_URL}/pipeline-library.git" /home/repo/mcp-ci/pipeline-library/
- chown -R git:www-data /home/repo/mk/mk-pipelines/*
- chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*
-fi
-
-echo "installing formulas"
-_apt_cfg
-curl -s $MCP_SALT_REPO_KEY | sudo apt-key add -
-echo $MCP_SALT_REPO > /etc/apt/sources.list.d/mcp_salt.list
-apt-get update
-apt-get install -y $FORMULAS
-rm -rf /srv/salt/reclass/classes/service/*
-cd /srv/salt/reclass/classes/service/;ls /usr/share/salt-formulas/reclass/service/ -1 | xargs -I{} ln -s /usr/share/salt-formulas/reclass/service/{};cd /root
-
-salt-call saltutil.refresh_pillar
-salt-call saltutil.sync_all
-if ! $(reclass -n ${SALT_MASTER_MINION_ID} > /dev/null ) ; then
- echo "ERROR: Reclass render failed!"
- exit 1
-fi
-
-salt-call state.sls linux.network,linux,openssh,salt
-salt-call -t5 pkg.install salt-master,salt-minion
-sleep 5
-salt-call state.sls salt
-# Sometimes, maas can stuck :(
-salt-call state.sls maas.cluster,maas.region || salt-call state.sls maas.cluster,maas.region
-salt-call state.sls reclass,ntp
-
-_post_maas_cfg
-salt-call state.sls maas.cluster,maas.region || salt-call state.sls maas.cluster,maas.region
-
-ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts || true
-
-pillar=$(salt-call pillar.data jenkins:client)
-
-if [[ $pillar == *"job"* ]]; then
- salt-call state.sls jenkins.client
-fi
-
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml
index fb217b2..6c838cb 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml
@@ -10,13 +10,20 @@
# Install OpenStack control services
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
+
+- description: WR Install cinder volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:volume' state.sls cinder
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-ocata.yaml
similarity index 97%
rename from tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml
rename to tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-ocata.yaml
index a9e33af..50da2d4 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-ocata.yaml
@@ -30,6 +30,7 @@
backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
bmk_enabled: 'False'
ceph_enabled: 'False'
+ opencontrail_compute_iface: enp5s0f0
openstack_nfv_dpdk_enabled: 'True'
openstack_nfv_sriov_enabled: 'True'
openstack_nfv_sriov_network: physnet1
@@ -91,7 +92,7 @@
control_vlan: '2422'
cookiecutter_template_branch: ''
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.49.65
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
@@ -153,7 +154,6 @@
opencontrail_router01_hostname: rtr01
opencontrail_router02_address: 10.167.8.101
opencontrail_router02_hostname: rtr02
- openldap_enabled: 'False'
openssh_groups: ''
openstack_benchmark_node01_address: 10.167.8.95
openstack_benchmark_node01_hostname: bmk01
@@ -190,9 +190,6 @@
openstack_network_engine: opencontrail
openstack_neutron_bgp_vpn: 'False'
openstack_neutron_bgp_vpn_driver: bagpipe
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
openstack_nova_compute_reserved_host_memory_mb: '900'
openstack_proxy_address: 10.167.8.80
openstack_proxy_hostname: prx
@@ -201,7 +198,7 @@
openstack_proxy_node02_address: 10.167.8.82
openstack_proxy_node02_hostname: prx02
openstack_upgrade_node01_address: 10.167.8.19
- openstack_version: pike
+ openstack_version: ocata
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
oss_webhook_app_id: '24'
@@ -216,7 +213,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 172.16.49.66
shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
stacklight_enabled: 'True'
stacklight_log_address: 10.167.8.60
stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail.yaml
index 1e06917..b130ae8 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail.yaml
@@ -92,7 +92,7 @@
control_vlan: '2422'
cookiecutter_template_branch: ''
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.49.65
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
@@ -213,7 +213,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 172.16.49.66
shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
stacklight_enabled: 'True'
stacklight_log_address: 10.167.8.60
stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
index 67d7c0c..e59fdf8 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
@@ -2,13 +2,13 @@
{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-contrail40-nfv') %}
# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
+{%- set CLUSTER_CONTEXT_NAME = os_env('CLUSTER_CONTEXT_NAME', 'salt-context-cookiecutter-contrail.yaml') %}
{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
@@ -32,9 +32,9 @@
set -e;
# Remove rack01 key
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
# Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+ reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
@@ -43,8 +43,8 @@
cmd: |
set -e;
. /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml
index 6c9e48f..cc69c64 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml
@@ -47,37 +47,13 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- # - echo "Preparing base OS"
-
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - apt-get update
-
- # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- ########################################################
- # Node is ready, allow SSH access
- # - echo "Allow SSH access ..."
- # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-bm-contrail40/core.yaml b/tcp_tests/templates/cookied-bm-contrail40/core.yaml
index c815d86..21ab849 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/core.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/core.yaml
@@ -3,124 +3,20 @@
{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_KVM02 with context %}
{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_KVM03 with context %}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Setup glusterfs on primary controller
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install nginx on prx nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the OpenStack control VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
- echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail40/master_config.sh b/tcp_tests/templates/cookied-bm-contrail40/master_config.sh
deleted file mode 100644
index a250862..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40/master_config.sh
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/bin/bash -xe
-
-export SALT_MASTER_DEPLOY_IP=${SALT_MASTER_DEPLOY_IP:-"172.16.49.66"}
-export SALT_MASTER_MINION_ID=${SALT_MASTER_MINION_ID:-"cfg01.cookied-bm-contrail40.local"}
-export DEPLOY_NETWORK_GW=${DEPLOY_NETWORK_GW:-"172.16.49.65"}
-export DEPLOY_NETWORK_NETMASK=${DEPLOY_NETWORK_NETMASK:-"255.255.255.192"}
-export DNS_SERVERS=${DNS_SERVERS:-"172.18.208.44"}
-export http_proxy=${http_proxy:-""}
-export https_proxy=${https_proxy:-""}
-export PIPELINES_FROM_ISO=${PIPELINES_FROM_ISO:-"false"}
-export PIPELINE_REPO_URL=${PIPELINE_REPO_URL:-"https://github.com/Mirantis"}
-export MCP_VERSION=${MCP_VERSION:-"proposed"}
-export MCP_SALT_REPO_KEY=${MCP_SALT_REPO_KEY:-"http://apt.mirantis.com/public.gpg"}
-export MCP_SALT_REPO_URL=${MCP_SALT_REPO_URL:-"http://apt.mirantis.com/xenial"}
-export MCP_SALT_REPO="deb [arch=amd64] $MCP_SALT_REPO_URL $MCP_VERSION salt"
-export FORMULAS="salt-formula-*"
-# Not avaible in 2018.4 and pre.
-export LOCAL_REPOS=false
-#for cloning from aptly image use port 8088
-#export PIPELINE_REPO_URL=http://172.16.47.182:8088
-
-function _apt_cfg(){
- # TODO remove those function after 2018.4 release
- echo "Acquire::CompressionTypes::Order gz;" >/etc/apt/apt.conf.d/99compression-workaround-salt
- echo "Acquire::EnableSrvRecords false;" >/etc/apt/apt.conf.d/99enablesrvrecords-false
- echo "Acquire::http::Pipeline-Depth 0;" > /etc/apt/apt.conf.d/99aws-s3-mirrors-workaround-salt
- echo "APT::Install-Recommends false;" > /etc/apt/apt.conf.d/99dont_install_recommends-salt
- echo "APT::Install-Suggests false;" > /etc/apt/apt.conf.d/99dont_install_suggests-salt
- echo "Acquire::Languages none;" > /etc/apt/apt.conf.d/99dont_acquire_all_languages-salt
- echo "APT::Periodic::Update-Package-Lists 0;" > /etc/apt/apt.conf.d/99dont_update_package_list-salt
- echo "APT::Periodic::Download-Upgradeable-Packages 0;" > /etc/apt/apt.conf.d/99dont_update_download_upg_packages-salt
- echo "APT::Periodic::Unattended-Upgrade 0;" > /etc/apt/apt.conf.d/99disable_unattended_upgrade-salt
- echo "INFO: cleaning sources lists"
- rm -rv /etc/apt/sources.list.d/* || true
- echo > /etc/apt/sources.list || true
-}
-
-function _post_maas_cfg(){
- local PROFILE=mirantis
- # TODO: remove those check, and use only new version, adfter 2018.4 release
- if [[ -f /var/lib/maas/.maas_login.sh ]]; then
- /var/lib/maas/.maas_login.sh
- else
- echo "WARNING: Attempt to use old maas login schema.."
- TOKEN=$(cat /var/lib/maas/.maas_credentials);
- maas list | cut -d' ' -f1 | xargs -I{} maas logout {}
- maas login $PROFILE http://127.0.0.1:5240/MAAS/api/2.0/ "${TOKEN}"
- fi
- # disable backports for maas enlist pkg repo
- maas ${PROFILE} package-repository update 1 "disabled_pockets=backports"
- maas ${PROFILE} package-repository update 1 "arches=amd64"
- # Download ubuntu image from MAAS local mirror
- if [[ "$LOCAL_REPOS" == "true" ]] ; then
- maas ${PROFILE} boot-source-selections create 2 os="ubuntu" release="xenial" arches="amd64" subarches="*" labels="*"
- echo "WARNING: Removing default MAAS stream:"
- maas ${PROFILE} boot-source read 1
- maas ${PROFILE} boot-source delete 1
- maas ${PROFILE} boot-resources import
- # TODO wait for finish,and stop import.
- else
- maas ${PROFILE} boot-source-selections create 1 os="ubuntu" release="xenial" arches="amd64" subarches="*" labels="*"
- maas ${PROFILE} boot-resources import
- fi
- while [ ! -d /var/lib/maas/boot-resources/current/ubuntu/amd64/generic/xenial ]
- do
- sleep 10
- echo "WARNING: Image is still not ready"
- done
-}
-
-### Body
-echo "Preparing metadata model"
-mount /dev/cdrom /mnt/
-cp -rT /mnt/model/model /srv/salt/reclass
-chown -R root:root /srv/salt/reclass/*
-chown -R root:root /srv/salt/reclass/.git* || true
-chmod -R 644 /srv/salt/reclass/classes/cluster/* || true
-chmod -R 644 /srv/salt/reclass/classes/system/* || true
-
-echo "Configuring salt"
-#service salt-master restart
-envsubst < /root/minion.conf > /etc/salt/minion.d/minion.conf
-service salt-minion restart
-while true; do
- salt-key | grep "$SALT_MASTER_MINION_ID" && break
- sleep 5
-done
-sleep 5
-for i in $(salt-key -l accepted | grep -v Accepted | grep -v "$SALT_MASTER_MINION_ID"); do
- salt-key -d $i -y
-done
-
-find /var/lib/jenkins/jenkins.model.JenkinsLocationConfiguration.xml -type f -print0 | xargs -0 sed -i -e 's/10.167.4.15/'$SALT_MASTER_DEPLOY_IP'/g'
-
-echo "updating git repos"
-if [[ "$PIPELINES_FROM_ISO" == "true" ]] ; then
- cp -r /mnt/mk-pipelines/* /home/repo/mk/mk-pipelines/
- cp -r /mnt/pipeline-library/* /home/repo/mcp-ci/pipeline-library/
- umount /dev/cdrom || true
- chown -R git:www-data /home/repo/mk/mk-pipelines/*
- chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*
-else
- umount /dev/cdrom || true
- git clone --mirror "${PIPELINE_REPO_URL}/mk-pipelines.git" /home/repo/mk/mk-pipelines/
- git clone --mirror "${PIPELINE_REPO_URL}/pipeline-library.git" /home/repo/mcp-ci/pipeline-library/
- chown -R git:www-data /home/repo/mk/mk-pipelines/*
- chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*
-fi
-
-echo "installing formulas"
-_apt_cfg
-curl -s $MCP_SALT_REPO_KEY | sudo apt-key add -
-echo $MCP_SALT_REPO > /etc/apt/sources.list.d/mcp_salt.list
-apt-get update
-apt-get install -y $FORMULAS
-rm -rf /srv/salt/reclass/classes/service/*
-cd /srv/salt/reclass/classes/service/;ls /usr/share/salt-formulas/reclass/service/ -1 | xargs -I{} ln -s /usr/share/salt-formulas/reclass/service/{};cd /root
-
-salt-call saltutil.refresh_pillar
-salt-call saltutil.sync_all
-if ! $(reclass -n ${SALT_MASTER_MINION_ID} > /dev/null ) ; then
- echo "ERROR: Reclass render failed!"
- exit 1
-fi
-
-salt-call state.sls linux.network,linux,openssh,salt
-salt-call -t5 pkg.install salt-master,salt-minion
-sleep 5
-salt-call state.sls salt
-# Sometimes, maas can stuck :(
-salt-call state.sls maas.cluster,maas.region || salt-call state.sls maas.cluster,maas.region
-salt-call state.sls reclass,ntp
-
-_post_maas_cfg
-salt-call state.sls maas.cluster,maas.region || salt-call state.sls maas.cluster,maas.region
-
-ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts || true
-
-pillar=$(salt-call pillar.data jenkins:client)
-
-if [[ $pillar == *"job"* ]]; then
- salt-call state.sls jenkins.client
-fi
-
diff --git a/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml b/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml
index 1bd831a..ec83dc7 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml
@@ -10,13 +10,20 @@
# Install OpenStack control services
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
+
+- description: WR Install cinder volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:volume' state.sls cinder
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml
similarity index 95%
copy from tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml
copy to tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml
index a9e33af..db9b61b 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml
@@ -30,15 +30,6 @@
backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
bmk_enabled: 'False'
ceph_enabled: 'False'
- openstack_nfv_dpdk_enabled: 'True'
- openstack_nfv_sriov_enabled: 'True'
- openstack_nfv_sriov_network: physnet1
- openstack_nfv_sriov_numvfs: '7'
- openstack_nfv_sriov_pf_nic: enp5s0f1
- openstack_nova_compute_hugepages_count: '16'
- openstack_nova_compute_nfv_req_enabled: 'True'
- openstack_nova_cpu_pinning: 6,7,8,9,10,11
-
cicd_control_node01_address: 10.167.8.91
cicd_control_node01_hostname: cid01
cicd_control_node02_address: 10.167.8.92
@@ -91,7 +82,7 @@
control_vlan: '2422'
cookiecutter_template_branch: ''
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.49.65
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
@@ -201,7 +192,7 @@
openstack_proxy_node02_address: 10.167.8.82
openstack_proxy_node02_hostname: prx02
openstack_upgrade_node01_address: 10.167.8.19
- openstack_version: pike
+ openstack_version: ocata
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
oss_webhook_app_id: '24'
@@ -216,7 +207,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 172.16.49.66
shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
stacklight_enabled: 'True'
stacklight_log_address: 10.167.8.60
stacklight_log_hostname: log
@@ -226,7 +217,7 @@
stacklight_log_node02_hostname: log02
stacklight_log_node03_address: 10.167.8.63
stacklight_log_node03_hostname: log03
- stacklight_long_term_storage_type: influxdb
+ stacklight_long_term_storage_type: prometheus
stacklight_monitor_address: 10.167.8.70
stacklight_monitor_hostname: mon
stacklight_monitor_node01_address: 10.167.8.71
diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml
index 1125a8f..4a9dc13 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml
@@ -82,7 +82,7 @@
control_vlan: '2422'
cookiecutter_template_branch: ''
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.49.65
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
@@ -207,7 +207,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 172.16.49.66
shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
stacklight_enabled: 'True'
stacklight_log_address: 10.167.8.60
stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
index e9f0e87..3542e9b 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
@@ -2,13 +2,13 @@
{% from 'cookied-bm-contrail40/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-bm-contrail40/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-contrail40') %}
# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
+{%- set CLUSTER_CONTEXT_NAME = os_env('CLUSTER_CONTEXT_NAME', 'salt-context-cookiecutter-contrail.yaml') %}
{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
@@ -32,9 +32,9 @@
set -e;
# Remove rack01 key
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
# Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+ reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
@@ -43,8 +43,8 @@
cmd: |
set -e;
. /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data-cfg01.yaml
index 6c9e48f..6b6ec9f 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data-cfg01.yaml
@@ -47,37 +47,14 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- # - echo "Preparing base OS"
-
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - apt-get update
-
- # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- ########################################################
- # Node is ready, allow SSH access
- # - echo "Allow SSH access ..."
- # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/core.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/core.yaml
index 530a4e7..55d6d8d 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/core.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/core.yaml
@@ -1,117 +1,17 @@
{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
index 0d9e84c..ce13598 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
@@ -79,7 +79,7 @@
control_vlan: '2416'
cookiecutter_template_branch: proposed
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.49.62
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.0/26
@@ -116,10 +116,13 @@
openstack_benchmark_node01_address: 10.167.11.95
openstack_benchmark_node01_hostname: bmk01
openstack_cluster_size: compact
- openstack_compute_count: '3'
+ openstack_compute_count: '2'
openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.11
- openstack_compute_rack01_tenant_subnet: 10.167.12
+ openstack_compute_rack01_single_subnet: 172.16.10
+ openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 10.167.11.10
openstack_control_hostname: ctl
openstack_control_node01_address: 10.167.11.11
@@ -190,7 +193,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 172.16.49.2
shared_reclass_branch: proposed
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
stacklight_enabled: 'False'
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
index 3897e91..459ab69 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
@@ -5,7 +5,7 @@
{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-baremetal-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-baremetal-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
@@ -39,7 +39,7 @@
[[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
. /root/venv-reclass-tools/bin/activate;
pip install git+https://github.com/dis-xcom/reclass-tools;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml
index 3f4f128..b77550a 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- #- cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -51,16 +49,13 @@
- echo "Preparing base OS"
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
write_files:
- path: /etc/network/interfaces
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml
index 612299f..084a922 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml
@@ -73,8 +73,9 @@
net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.128/26:26') }}
params:
ip_reserved:
- gateway: +1
- l2_network_device: -2
+ gateway: '172.17.42.129'
+ ip_ranges:
+ dhcp: ['172.17.42.130', '172.17.42.180']
groups:
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/core.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/core.yaml
similarity index 81%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/core.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/core.yaml
index 4ab0f03..2d79d55 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/core.yaml
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/core.yaml
@@ -1,8 +1,8 @@
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
# Install support services
- description: Sync all
diff --git a/tcp_tests/templates/cookied-bm-k8s-contrail/k8s.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/k8s.yaml
new file mode 100644
index 0000000..c505c58
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/k8s.yaml
@@ -0,0 +1,278 @@
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_CTL01 with context %}
+
+{%- macro MACRO_CHECK_SYSTEMCTL() %}
+{#######################################}
+- description: Check systemctl on compute
+ cmd: |
+ set -ex;
+ salt 'cmp*' cmd.run "systemctl --version";
+ salt 'cmp*' cmd.run "journalctl -u dbus";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: true
+{%- endmacro %}
+
+- description: Install keepalived on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install etcd
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@etcd:server' state.sls etcd.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 30}
+ skip_fail: false
+
+- description: Install certs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@etcd:server' state.sls salt.minion -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+# Install opencontrail database services
+- description: Install opencontrail database services for 01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Install opencontrail database services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database' state.sls opencontrail.database
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+# Install opencontrail control services
+- description: Install opencontrail services for 01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Install opencontrail services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Install docker host
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@kubernetes:master' state.sls salt.minion.cert
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Install docker host
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@docker:host' state.sls docker.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+ #- description: Configure OpenContrail as an add-on for Kubernetes
+ # cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ # -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
+ # node_name: {{ HOSTNAME_CFG01 }}
+ # retry: {count: 1, delay: 5}
+ # skip_fail: false
+
+- description: Install Kubernetes components
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@kubernetes:master' state.sls kubernetes.pool
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: "Run k8s master at *01* to simplify namespaces creation"
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@kubernetes:master and *01*' state.sls kubernetes.master exclude=kubernetes.master.setup,kubernetes.master.kube-addons
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Run k8s without master.setup
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup,kubernetes.master.kube-addons
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Check the etcd health
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@etcd:server' cmd.run '. /var/lib/etcd/configenv && etcdctl cluster-health'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ MACRO_CHECK_SYSTEMCTL() }}
+
+- description: Run Kubernetes master setup
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@kubernetes:master' state.sls kubernetes.master.setup
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart Kubelet
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@kubernetes:master' service.restart 'kubelet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Waiting for contrail-containers up. opencontrail.client state should be run only after that
+ cmd: |
+ sleep 30;
+ total_pods=`kubectl get pods --all-namespaces | awk '/opencontrail/ {print $3}' | cut -d "/" -f2`
+ for i in `seq 1 10`; do
+ ready_pods=`kubectl get pods --all-namespaces | awk '/opencontrail/ {print $3}' | cut -d "/" -f1`
+ if [ "$ready_pods" == "$total_pods" ];then
+ echo "containers are ready. Going to the next step"
+ break
+ elif [ "$i" -ne "10" ]; then
+ echo "Opencontrail containers is not ready. $ready_pods from $total_pods is ready."
+ sleep 60
+ continue
+ else
+ echo "Failed to up contrail containers in 10 minutes"
+ exit 1
+ fi
+ done
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Check all pods
+ cmd: |
+ salt 'ctl*' cmd.run "kubectl -o wide get pods --all-namespaces";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# Install contrail computes
+- description: Set up the OpenContrail resources
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 60}
+ skip_fail: false
+
+- description: Apply opencontrail.client on contrail computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Reboot contrail computes
+ cmd: |
+ salt --async -C 'I@opencontrail:compute' system.reboot;
+ sleep 450;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Apply opencontrail.client on contrail computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:compute' state.sls opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Apply opencontrail.client on contrail computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:compute' state.sls opencontrail
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Refresh pillars on cmp*
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'cmp*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Sync all on contrail computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:compute' saltutil.sync_all
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install docker host
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@kubernetes:master' state.sls salt.minion.cert
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Install Kubernetes components
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@kubernetes:pool and not I@kubernetes:master' state.sls kubernetes.pool
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Restart Kubelet
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@kubernetes:pool and not I@kubernetes:master' service.restart 'kubelet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Configure OpenContrail as an add-on for Kubernetes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Renew hosts file on a whole cluster
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '*' state.sls linux.network.host;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Final check all pods
+ cmd: |
+ sleep 60;
+ salt 'ctl*' cmd.run "kubectl -o wide get pods --all-namespaces";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check contrail status on all pods
+ cmd: |
+ pods=`kubectl get pods --all-namespaces | awk '/opencontrail/ {print $2}'`
+ for i in $pods; do
+ kubectl exec $i -c opencontrail-controller -n kube-system contrail-status;
+ kubectl exec $i -c opencontrail-analytics -n kube-system contrail-status;
+ kubectl exec $i -c opencontrail-analyticsdb -n kube-system contrail-status;
+ done
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-k8s-contrail/lab04-upgrade-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/lab04-upgrade-physical-inventory.yaml
new file mode 100644
index 0000000..ad4e04a
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/lab04-upgrade-physical-inventory.yaml
@@ -0,0 +1,119 @@
+nodes:
+ cfg01.bm-k8s-contrail.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ # Physical nodes
+
+ kvm01.bm-k8s-contrail.local:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: single_vlan_ctl
+
+ kvm02.bm-k8s-contrail.local:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: single_vlan_ctl
+
+ kvm03.bm-k8s-contrail.local:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: single_vlan_ctl
+
+ ctl01.bm-k8s-contrail.local:
+ reclass_storage_name: kubernetes_control_node01
+ roles:
+ - kubernetes_control_contrail
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ deploy_address: 172.17.41.9
+ enp2s0f1:
+ role: single_vlan_ctl
+ single_address: 10.167.8.239
+
+ ctl02.bm-k8s-contrail.local:
+ reclass_storage_name: kubernetes_control_node02
+ roles:
+ - kubernetes_control_contrail
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ deploy_address: 172.17.41.10
+ enp2s0f1:
+ role: single_vlan_ctl
+ single_address: 10.167.8.238
+
+ ctl03.bm-k8s-contrail.local:
+ reclass_storage_name: kubernetes_control_node03
+ roles:
+ - kubernetes_control_contrail
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ deploy_address: 172.17.41.11
+ enp2s0f1:
+ role: single_vlan_ctl
+ single_address: 10.167.8.237
+
+ cmp<<count>>:
+ reclass_storage_name: kubernetes_compute_rack01
+ roles:
+ - kubernetes_compute_contrail
+ - linux_system_codename_xenial
+ - salt_master_host
+ interfaces:
+ enp9s0f0:
+ role: single_dhcp
+ ens11f1:
+ role: k8s_oc40_only_vhost_on_control_vlan
+ # cmp001.bm-k8s-contrail.local:
+ # reclass_storage_name: kubernetes_compute_node001
+ # roles:
+ # - linux_system_codename_xenial
+ # - kubernetes_compute_contrail
+ # - salt_master_host
+ # interfaces:
+ # enp9s0f0:
+ # role: single_dhcp
+ # ens11f1:
+ # role: k8s_oc40_only_vhost_on_control_vlan
+ # single_address: 10.167.8.103
+ #
+ # cmp002.bm-k8s-contrail.local:
+ # reclass_storage_name: kubernetes_compute_node002
+ # roles:
+ # - linux_system_codename_xenial
+ # - kubernetes_compute_contrail
+ # - salt_master_host
+ # interfaces:
+ # enp9s0f0:
+ # role: single_dhcp
+ # ens11f1:
+ # role: k8s_oc40_only_vhost_on_control_vlan
+ # single_address: 10.167.8.104
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
similarity index 89%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
index 88aef93..c8fc345 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
@@ -82,10 +82,10 @@
control_vlan: '2410'
cookiecutter_template_branch: ''
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 172.17.41.2
deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.49.64/26
+ deploy_network_subnet: 172.17.41.0/26
deployment_type: physical
dns_server01: 172.17.41.2
dns_server02: 172.17.41.2
@@ -107,9 +107,15 @@
infra_primary_second_nic: eth2
kubernetes_enabled: 'True'
kubernetes_compute_count: 2
- kubernetes_compute_rack01_single_subnet: 10.167.8
- kubernetes_compute_rack01_tenant_subnet: 192.168.0
+ kubernetes_compute_rack01_hostname: cmp
+ kubernetes_compute_single_address_ranges: 10.167.8.103-10.167.8.104
+ kubernetes_compute_tenant_address_ranges: 10.167.8.103-10.167.8.104
kubernetes_network_opencontrail_enabled: 'True'
+ kubernetes_keepalived_vip_interface: br_ctl
+ kubernetes_metallb_enabled: 'False' # Not used with opencontrail
+ metallb_addresses: 172.17.41.160-172.17.41.180
+ kubernetes_ingressnginx_enabled: 'True'
+ kubernetes_ingressnginx_controller_replicas: 2
local_repositories: 'False'
maas_deploy_address: 172.16.49.66
maas_deploy_range_end: 10.0.0.254
@@ -131,15 +137,16 @@
kubernetes_control_node03_address: 10.167.8.237
kubernetes_control_node03_hostname: ctl03
linux_repo_contrail_component: oc40
- opencontrail_analytics_address: 10.167.8.30
opencontrail_analytics_hostname: ctl
- opencontrail_analytics_node01_address: 10.167.8.31
opencontrail_analytics_node01_hostname: ctl01
- opencontrail_analytics_node02_address: 10.167.8.32
opencontrail_analytics_node02_hostname: ctl02
- opencontrail_analytics_node03_address: 10.167.8.33
opencontrail_analytics_node03_hostname: ctl03
+ opencontrail_analytics_address: ${_param:opencontrail_control_address}
+ opencontrail_analytics_node01_address: ${_param:opencontrail_control_node01_address}
+ opencontrail_analytics_node02_address: ${_param:opencontrail_control_node02_address}
+ opencontrail_analytics_node03_address: ${_param:opencontrail_control_node03_address}
opencontrail_compute_iface_mask: '24'
+ opencontrail_compute_iface: ens11f1
opencontrail_control_address: 10.167.8.236
opencontrail_control_hostname: ctl
opencontrail_control_node01_address: 10.167.8.239
@@ -153,6 +160,7 @@
opencontrail_router01_hostname: rtr01
opencontrail_router02_address: 10.167.8.101
opencontrail_router02_hostname: rtr02
+ opencontrail_public_ip_range: 172.17.41.128/26
opencontrail_version: '4.0'
openstack_enabled: 'False'
openssh_groups: ''
@@ -175,7 +183,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 172.17.41.3
shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
stacklight_enabled: 'True'
stacklight_log_address: 10.167.8.60
stacklight_log_hostname: log
@@ -185,7 +193,6 @@
stacklight_log_node02_hostname: log02
stacklight_log_node03_address: 10.167.8.63
stacklight_log_node03_hostname: log03
- stacklight_long_term_storage_type: influxdb
stacklight_monitor_address: 10.167.8.70
stacklight_monitor_hostname: mon
stacklight_monitor_node01_address: 10.167.8.71
@@ -204,10 +211,10 @@
stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.1
+ tenant_network_gateway: 10.167.8.1
tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 192.168.0.0/24
- tenant_vlan: '2411'
+ tenant_network_subnet: 10.167.8.0/24
+ tenant_vlan: '2410'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'True'
vnf_onboarding_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-environment.yaml
new file mode 100644
index 0000000..206dead
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-environment.yaml
@@ -0,0 +1,108 @@
+nodes:
+ mon01.bm-k8s-contrail.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mon02.bm-k8s-contrail.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mon03.bm-k8s-contrail.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mtr01.bm-k8s-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mtr02.bm-k8s-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mtr03.bm-k8s-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ log01.bm-k8s-contrail.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ log02.bm-k8s-contrail.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ log03.bm-k8s-contrail.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ cid01.bm-k8s-contrail.local:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ cid02.bm-k8s-contrail.local:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ cid03.bm-k8s-contrail.local:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/salt.yaml
similarity index 76%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/salt.yaml
index 951075e..274fb44 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/salt.yaml
@@ -1,11 +1,11 @@
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
@@ -21,7 +21,7 @@
{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN, CLUSTER_PRODUCT_MODELS='cicd infra kubernetes opencontrail stacklight2') }}
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
@@ -30,22 +30,19 @@
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-- description: "Workaround for rack01 compute generator"
- cmd: |
- set -e;
- # Remove rack01 key
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.kubernetes_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
- description: "Change path to internal storage for salt.control images"
cmd: |
set -e;
. /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Delete proxy inclusion from kvm
+ cmd: |
+ sed -i 's/- system.salt.control.cluster.kubernetes_proxy_cluster//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
@@ -58,6 +55,24 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: "Excluding tenant network from cluster"
+ cmd: |
+ set -e;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.opencontrail_compute_address '${_param:single_address}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/opencontrail/compute.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: "Use correct compute interface"
+ cmd: |
+ set -e;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.opencontrail_compute_iface 'ens11f1.${_param:control_vlan}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/opencontrail/init.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
- description: Rerun openssh after env model is generated
cmd: |
salt-call state.sls openssh
@@ -80,7 +95,7 @@
- description: Update minion information
cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/sl.yaml
similarity index 76%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/sl.yaml
index 0b559a8..cb929e4 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/sl.yaml
@@ -1,4 +1,5 @@
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
# Install docker swarm
- description: Configure docker service
@@ -65,36 +66,31 @@
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:client' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
# Install slv2 infra
#Launch containers
- description: Install Mongo if target matches
cmd: |
if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Configure Alerta if it is exists
+- description: Install Mongo if target matches
cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
fi
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
+ retry: {count: 5, delay: 20}
skip_fail: false
- description: Install telegraf
@@ -113,19 +109,31 @@
skip_fail: false
- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true and *01*' state.sls elasticsearch.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true' state.sls elasticsearch.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true and *01*' state.sls kibana.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true' state.sls kibana.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 30}
skip_fail: false
@@ -141,23 +149,12 @@
INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server and *01*' state.sls influxdb;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb;
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
# Install service for the log collection
- description: Configure fluentd
@@ -186,72 +183,85 @@
retry: {count: 1, delay: 10}
skip_fail: false
-# Collect grains needed to configure the services
+ ######################################
+ ######################################
+ ######################################
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' mysql.status
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Check Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' mysql.status
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Collect Grains
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
+- description: Check docker ps
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Remote Collector in Docker Swarm for Openstack deployments
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+- description: launch prometheus containers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Check docker ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 60}
+ skip_fail: false
+
+- description: Configure Alerta if it is exists
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Run salt minion to create cert files
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion.cert
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--meta-data.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data-cfg01.yaml
similarity index 82%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data-cfg01.yaml
index a73ca23..16bd9f6 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data-cfg01.yaml
@@ -20,6 +20,7 @@
bootcmd:
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication yes/' /etc/ssh/sshd_config
- service sshd restart
output:
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
@@ -29,6 +30,9 @@
- sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- sudo resolvconf -u
+ # Enable grub menu using updated config below
+ - update-grub
+
# Prepare network connection
- sudo ifdown ens3
- sudo ip r d default || true # remove existing default route to get it from dhcp
@@ -42,10 +46,14 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - echo "nameserver 172.17.41.2" > /etc/resolv.conf;
- # Enable grub menu using updated config below
- - update-grub
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604-hwe-compute.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604-hwe-compute.yaml
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay.yaml
similarity index 81%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/underlay.yaml
index da6afea..089f343 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay.yaml
@@ -1,8 +1,8 @@
# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-mcp-pike-k8s-contrail') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-k8s-contrail') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
@@ -22,10 +22,10 @@
{% set ETH0_IP_ADDRESS_CTL01 = os_env('ETH0_IP_ADDRESS_CTL01', '172.17.41.9') %}
{% set ETH0_IP_ADDRESS_CTL02 = os_env('ETH0_IP_ADDRESS_CTL02', '172.17.41.10') %}
{% set ETH0_IP_ADDRESS_CTL03 = os_env('ETH0_IP_ADDRESS_CTL03', '172.17.41.11') %}
-{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
-{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml' as CLOUDINIT_USER_DATA_HWE_CMP with context %}
+{% import 'cookied-bm-k8s-contrail/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-bm-k8s-contrail/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-bm-k8s-contrail/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
+{% import 'cookied-bm-k8s-contrail/underlay--user-data1604-hwe-compute.yaml' as CLOUDINIT_USER_DATA_HWE_CMP with context %}
---
aliases:
@@ -38,7 +38,7 @@
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-bm-mcp-pike-k8s-contrail_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'cookied-bm-k8s-contrail_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
admin-pool01:
@@ -179,96 +179,6 @@
nodes:
- # - name: {{ HOSTNAME_CFG01 }}
- # role: salt_master
- # params:
- # ipmi_user: !os_env IPMI_USER
- # ipmi_password: !os_env IPMI_PASSWORD
- # ipmi_previlegies: OPERATOR
- # ipmi_host: !os_env IPMI_HOST_CFG01 # hostname or IP address
- # ipmi_lan_interface: lanplus
- # ipmi_port: 623
-
- # root_volume_name: system # see 'volumes' below
- # cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: enp3s0f1 # see 'interfaces' below.
- # volumes:
- # - name: system
- # capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # # The same as for agent URL, here is an URL to the image that should be
- # # used for deploy the node. It should also be accessible from deploying
- # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
-
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data_cfg01
-
- # interfaces:
- # - label: enp3s0f0 # Infra interface
- # mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
- # - label: enp3s0f1
- # l2_network_device: admin
- # mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
-
- # network_config:
- # enp3s0f0:
- # networks:
- # - infra
- # enp3s0f1:
- # networks:
- # - admin
- # - name: {{ HOSTNAME_PRX01 }}
- # role: salt_minion
- # params:
- # ipmi_user: !os_env IPMI_USER
- # ipmi_password: !os_env IPMI_PASSWORD
- # ipmi_previlegies: OPERATOR
- # ipmi_host: !os_env IPMI_HOST_PRX01 # hostname or IP address
- # ipmi_lan_interface: lanplus
- # ipmi_port: 623
-
- # root_volume_name: system # see 'volumes' below
- # cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
- # volumes:
- # - name: system
- # capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # # The same as for agent URL, here is an URL to the image that should be
- # # used for deploy the node. It should also be accessible from deploying
- # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
-
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data
-
- # interfaces:
- # - label: enp9s0f0
- # l2_network_device: admin
- # mac_address: !os_env ETH0_MAC_ADDRESS_PRX01
- # - label: enp9s0f1
- # mac_address: !os_env ETH1_MAC_ADDRESS_PRX01
-
- # network_config:
- # enp9s0f0:
- # networks:
- # - admin
- # bond0:
- # networks:
- # - control
- # aggregation: active-backup
- # parents:
- # - enp9s0f1
-
- name: {{ HOSTNAME_KVM01 }}
role: salt_minion
params:
@@ -415,7 +325,7 @@
- enp9s0f1
- name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
+ role: k8s_controller
params:
ipmi_user: !os_env IPMI_USER
ipmi_password: !os_env IPMI_PASSWORD
@@ -591,12 +501,6 @@
enp9s0f0:
networks:
- admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
- name: {{ HOSTNAME_CMP002 }}
role: salt_minion
@@ -638,9 +542,3 @@
enp9s0f0:
networks:
- admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
index 6dc4829..a3de973 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
@@ -1,124 +1,11 @@
{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install nginx on prx nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
index 9e8dd5e..1d8cbbf 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
@@ -2,20 +2,13 @@
{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CTL01 with context %}
{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW01 with context %}
{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW02 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
{% import 'shared-salt.yaml' as SHARED with context %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
-
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
- description: Install cinder volume
@@ -26,60 +19,7 @@
skip_fail: false
{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=true) }}
-
{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE() }}
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext 172.17.42.0/26 --name net04_ext__subnet --disable-dhcp --allocation-pool start=172.17.42.10,end=172.17.42.60 --gateway 172.17.42.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 192.168.0.0/24 --name net04__subnet --allocation-pool start=192.168.0.120,end=192.168.0.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
index b0e32ef..7585c41 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
@@ -1,7 +1,7 @@
default_context:
mcp_version: proposed
ceph_enabled: 'False'
- cicd_enabled: 'True'
+ cicd_enabled: 'False'
cicd_control_node01_address: 10.167.4.91
cicd_control_node01_hostname: cid01
cicd_control_node02_address: 10.167.4.92
@@ -23,7 +23,7 @@
control_vlan: '2404'
cookiecutter_template_branch: proposed
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.164.1
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.164.0/26
@@ -60,8 +60,11 @@
openstack_benchmark_node01_hostname: bmk01
openstack_compute_count: '2'
openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.4
- openstack_compute_rack01_tenant_subnet: 10.167.6
+ openstack_compute_rack01_single_subnet: 172.16.10
+ openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_compute_node01_hostname: cmp01
openstack_compute_node02_hostname: cmp02
openstack_compute_node01_address: 10.167.4.3
@@ -154,7 +157,7 @@
salt_master_address: 10.167.4.2
salt_master_hostname: cfg01
salt_master_management_address: 172.16.164.2
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
stacklight_enabled: 'False'
fluentd_enabled: 'False'
stacklight_log_address: 10.167.4.60
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
index def5353..692cf19 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
@@ -44,34 +44,29 @@
enp9s0f1:
role: bond0_ab_ovs_vlan_ctl
- cmp01.cookied-bm-mcp-dvr-vxlan.local:
+ cmp001.cookied-bm-mcp-dvr-vxlan.local:
reclass_storage_name: openstack_compute_node01
roles:
- openstack_compute
- - features_lvm_backend
+ - features_lvm_backend_volume_sdb
- linux_system_codename_xenial
interfaces:
enp9s0f0:
role: single_mgm_dhcp
enp9s0f1:
role: bond0_ab_dvr_vxlan_ctl_mesh_floating
- single_address: 10.167.4.105
- tenant_address: 10.167.6.105
-
- cmp02.cookied-bm-mcp-dvr-vxlan.local:
+ cmp002.cookied-bm-mcp-dvr-vxlan.local:
reclass_storage_name: openstack_compute_node02
roles:
- openstack_compute
- - features_lvm_backend
+ - features_lvm_backend_volume_sdb
- linux_system_codename_xenial
interfaces:
enp9s0f0:
role: single_mgm_dhcp
enp9s0f1:
role: bond0_ab_dvr_vxlan_ctl_mesh_floating
- single_address: 10.167.4.106
- tenant_address: 10.167.6.106
gtw01.cookied-bm-mcp-dvr-vxlan.local:
reclass_storage_name: openstack_gateway_node01
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
index 37d0b14..6cace03 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
@@ -4,6 +4,7 @@
roles:
- openstack_control_leader
- linux_system_codename_xenial
+ - features_lvm_backend_control
interfaces:
ens2:
role: single_dhcp
@@ -15,6 +16,7 @@
roles:
- openstack_control
- linux_system_codename_xenial
+ - features_lvm_backend_control
interfaces:
ens2:
role: single_dhcp
@@ -26,6 +28,7 @@
roles:
- openstack_control
- linux_system_codename_xenial
+ - features_lvm_backend_control
interfaces:
ens2:
role: single_dhcp
@@ -119,57 +122,3 @@
role: single_dhcp
ens3:
role: single_ctl
-
- cid01.cookied-bm-mcp-dvr-vxlan.local:
- reclass_storage_name: cicd_control_node01
- roles:
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- cid02.cookied-bm-mcp-dvr-vxlan.local:
- reclass_storage_name: cicd_control_node02
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- cid03.cookied-bm-mcp-dvr-vxlan.local:
- reclass_storage_name: cicd_control_node03
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
-# mon01.cookied-bm-mcp-dvr-vxlan.local:
-# reclass_storage_name: stacklight_server_node01
-# roles:
-# - stacklightv2_server_leader
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
-#
-# mon02.cookied-bm-mcp-dvr-vxlan.local:
-# reclass_storage_name: stacklight_server_node02
-# roles:
-# - stacklightv2_server
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
-#
-# mon03.cookied-bm-mcp-dvr-vxlan.local:
-# reclass_storage_name: stacklight_server_node03
-# roles:
-# - stacklightv2_server
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
index dea085e..8804721 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
@@ -5,60 +5,26 @@
{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_KVM02 with context %}
{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_KVM03 with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-bm-mcp-dvr-vxlan') %}
{% import 'shared-salt.yaml' as SHARED with context %}
{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-- description: "WR for changing VCP images path to internal storage"
- cmd: |
- set -e;
- apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
- [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
- . /root/venv-reclass-tools/bin/activate;
- pip install git+https://github.com/dis-xcom/reclass-tools;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
{{ SHARED.MACRO_GENERATE_INVENTORY() }}
{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Temporary workaround for removing virtual gtw nodes
- cmd: |
- sed -i 's/\-\ system\.salt\.control\.sizes\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- sed -i 's/\-\ system\.salt\.control\.placement\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
- description: Rerun openssh after env model is generated
cmd: |
salt-call state.sls openssh
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
########################################
@@ -129,4 +95,3 @@
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
index 3f4f128..b77550a 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- #- cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -51,16 +49,13 @@
- echo "Preparing base OS"
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
write_files:
- path: /etc/network/interfaces
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
index 25c98bc..8d2bf09 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
@@ -6,8 +6,8 @@
{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.164.2') %}
@@ -15,8 +15,8 @@
{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.164.11') %}
{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.164.12') %}
{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.164.13') %}
-{% set ETH0_IP_ADDRESS_CMP01 = os_env('ETH0_IP_ADDRESS_CMP01', '172.16.164.3') %}
-{% set ETH0_IP_ADDRESS_CMP02 = os_env('ETH0_IP_ADDRESS_CMP02', '172.16.164.31') %}
+{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.164.3') %}
+{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.164.31') %}
{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.164.4') %}
{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.164.5') %}
@@ -48,8 +48,8 @@
default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
- default_{{ HOSTNAME_CMP01 }}: {{ ETH0_IP_ADDRESS_CMP01 }}
- default_{{ HOSTNAME_CMP02 }}: {{ ETH0_IP_ADDRESS_CMP02 }}
+ default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+ default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
ip_ranges:
@@ -73,8 +73,9 @@
net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.0/26:26') }}
params:
ip_reserved:
- gateway: +1
- l2_network_device: -2
+ gateway: '172.17.42.1'
+ ip_ranges:
+ dhcp: ['172.17.42.10', '172.17.42.60']
groups:
@@ -310,14 +311,13 @@
parents:
- enp9s0f1
-
- - name: {{ HOSTNAME_CMP01 }}
+ - name: {{ HOSTNAME_CMP001 }}
role: salt_minion
params:
ipmi_user: !os_env IPMI_USER
ipmi_password: !os_env IPMI_PASSWORD
ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP01 # hostname or IP address
+ ipmi_host: !os_env IPMI_HOST_CMP001 # hostname or IP address
ipmi_lan_interface: lanplus
ipmi_port: 623
@@ -343,9 +343,9 @@
interfaces:
- label: enp9s0f0
l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP01
+ mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
- label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP01
+ mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
network_config:
enp9s0f0:
networks:
@@ -358,15 +358,13 @@
- enp9s0f0
- enp9s0f1
-
-
- - name: {{ HOSTNAME_CMP02 }}
+ - name: {{ HOSTNAME_CMP002 }}
role: salt_minion
params:
ipmi_user: !os_env IPMI_USER
ipmi_password: !os_env IPMI_PASSWORD
ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP02 # hostname or IP address
+ ipmi_host: !os_env IPMI_HOST_CMP002 # hostname or IP address
ipmi_lan_interface: lanplus
ipmi_port: 623
@@ -392,9 +390,9 @@
interfaces:
- label: enp9s0f0
l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP02
+ mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
- label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP02
+ mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
network_config:
enp9s0f0:
networks:
@@ -407,7 +405,6 @@
- enp9s0f0
- enp9s0f1
-
- name: {{ HOSTNAME_GTW01 }}
role: salt_minion
params:
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml
index b7fcb07..b1e37c6 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml
@@ -3,124 +3,13 @@
{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM02 with context %}
{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM03 with context %}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Setup glusterfs on primary controller
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install nginx on prx nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the OpenStack control VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
- echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
index 089a255..d97665e 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
@@ -9,13 +9,20 @@
# Install OpenStack control services
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
+
+- description: WR Install cinder volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:volume' state.sls cinder
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-cookiecutter-contrail-dpdk.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-cookiecutter-contrail-dpdk.yaml
index 954323c..6e0fee1 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-cookiecutter-contrail-dpdk.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-cookiecutter-contrail-dpdk.yaml
@@ -43,7 +43,7 @@
control_vlan: '2422'
cookiecutter_template_branch: ''
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.49.126
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
@@ -163,7 +163,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 172.16.49.66
shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
stacklight_enabled: 'True'
stacklight_log_address: 10.167.8.60
stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
index 275f24c..77980d0 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
@@ -5,7 +5,7 @@
{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM02 with context %}
{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM03 with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-mcp-ocata-contrail-nfv') %}
@@ -33,9 +33,9 @@
set -e;
# Remove rack01 key
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
# Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+ reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
@@ -44,8 +44,8 @@
cmd: |
set -e;
. /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml
index 37082f6..5ba2e3f 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml
@@ -1,4 +1,5 @@
{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
# Install docker swarm
- description: Configure docker service
@@ -65,36 +66,31 @@
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:client' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
# Install slv2 infra
#Launch containers
- description: Install Mongo if target matches
cmd: |
if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Configure Alerta if it is exists
+- description: Install Mongo if target matches
cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
fi
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
+ retry: {count: 5, delay: 20}
skip_fail: false
- description: Install telegraf
@@ -113,19 +109,31 @@
skip_fail: false
- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true and *01*' state.sls elasticsearch.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true' state.sls elasticsearch.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true and *01*' state.sls kibana.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true' state.sls kibana.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 30}
skip_fail: false
@@ -141,23 +149,12 @@
INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server and *01*' state.sls influxdb;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb;
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
# Install service for the log collection
- description: Configure fluentd
@@ -186,31 +183,23 @@
retry: {count: 1, delay: 10}
skip_fail: false
-# Collect grains needed to configure the services
+ ######################################
+ ######################################
+ ######################################
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+- description: Collect Grains
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
+- description: Check docker ps
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
+ retry: {count: 2, delay: 10}
skip_fail: false
- description: Configure Remote Collector in Docker Swarm for Openstack deployments
@@ -219,39 +208,38 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+- description: launch prometheus containers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+- description: Check docker ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 60}
+ skip_fail: false
+
+- description: Configure Alerta if it is exists
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Run salt minion to create cert files
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion.cert
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml
index 7677268..59a799e 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml
@@ -47,43 +47,14 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- # - echo "Preparing base OS"
-
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - apt-get update
-
- # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- ########################################################
- # Node is ready, allow SSH access
- # - echo "Allow SSH access ..."
- # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
-
- # Use sshuttle to allow SSH access to the model-related control network 10.167.4.0/24 on baremetal/VM nodes from cfg01
- - sshuttle -r {{ ETH0_IP_ADDRESS_KVM01 }} 10.167.8.0/24 -D
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml
index 9971a9f..4dc3470 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml
@@ -3,124 +3,13 @@
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Setup glusterfs on primary controller
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install nginx on prx nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the OpenStack control VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
- echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
index 013fb29..fe01d30 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
@@ -9,13 +9,20 @@
# Install OpenStack control services
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
+
+- description: WR Install cinder volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:volume' state.sls cinder
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
index e57820e..476df0d 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
@@ -43,7 +43,7 @@
control_vlan: '2422'
cookiecutter_template_branch: ''
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.49.126
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
@@ -158,7 +158,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 172.16.49.66
shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
stacklight_enabled: 'True'
stacklight_log_address: 10.167.8.60
stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml.back b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml.back
index db949d4..02bcbf2 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml.back
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml.back
@@ -12,7 +12,7 @@
control_vlan: '2422'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.49.126
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
@@ -154,7 +154,7 @@
salt_master_address: 10.167.8.66
salt_master_hostname: cfg01
salt_master_management_address: 172.16.49.66
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
stacklight_enabled: 'True'
stacklight_log_address: 10.167.8.60
stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
index 3a54496..24ee31f 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
@@ -4,7 +4,7 @@
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import CUSTOM_VCP_TRUSTY_IMAGE_URL with context %}
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import CUSTOM_VCP_XENIAL_IMAGE_URL with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
@@ -34,9 +34,9 @@
set -e;
# Remove rack01 key
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
# Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+ reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
@@ -45,8 +45,8 @@
cmd: |
set -e;
. /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
index 925c795..0a3867a 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
@@ -1,4 +1,5 @@
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
# Install docker swarm
- description: Configure docker service
@@ -65,36 +66,31 @@
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:client' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
# Install slv2 infra
#Launch containers
- description: Install Mongo if target matches
cmd: |
if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Configure Alerta if it is exists
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+- description: Install Mongo if target matches
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
+ fi
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
+ retry: {count: 5, delay: 20}
skip_fail: false
- description: Install telegraf
@@ -112,29 +108,32 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Configure Prometheus exporters, if pillar 'prometheus:collector' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' match.pillar 'prometheus:collector' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' state.sls prometheus.collector
- fi
+- description: Install elasticsearch server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true and *01*' state.sls elasticsearch.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true' state.sls elasticsearch.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true and *01*' state.sls kibana.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true' state.sls kibana.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 30}
skip_fail: false
@@ -150,23 +149,12 @@
INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server and *01*' state.sls influxdb;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb;
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
# Install service for the log collection
- description: Configure fluentd
@@ -195,31 +183,23 @@
retry: {count: 1, delay: 10}
skip_fail: false
-# Collect grains needed to configure the services
+ ######################################
+ ######################################
+ ######################################
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+- description: Collect Grains
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
+- description: Check docker ps
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
+ retry: {count: 2, delay: 10}
skip_fail: false
- description: Configure Remote Collector in Docker Swarm for Openstack deployments
@@ -228,39 +208,38 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+- description: launch prometheus containers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+- description: Check docker ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 15}
+ retry: {count: 1, delay: 10}
skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 60}
+ skip_fail: false
+
+- description: Configure Alerta if it is exists
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Run salt minion to create cert files
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion.cert
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
index 6c9e48f..cde8295 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -47,38 +45,15 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- # - echo "Preparing base OS"
-
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget);
- # Configure Ubuntu mirrors
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - apt-get update
-
- # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- ########################################################
- # Node is ready, allow SSH access
- # - echo "Allow SSH access ..."
- # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
content: |
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/core.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/core.yaml
index e6dc270..4d9af8c 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/core.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/core.yaml
@@ -1,117 +1,19 @@
{% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
index f7dab55..d2d414b 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
@@ -10,7 +10,7 @@
# Install OpenStack control services
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -58,60 +58,3 @@
retry: {count: 10, delay: 30}
skip_fail: false
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext 172.17.42.128/26 --name net04_ext__subnet --disable-dhcp --allocation-pool start=172.17.42.130,end=172.17.42.180 --gateway 172.17.42.129'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04 --provider:network_type vxlan'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 10.167.12.0/24 --name net04__subnet --allocation-pool start=10.167.12.150,end=10.167.12.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01 --ha False'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: temp WR
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ifdown br-prv; ifup br-prv'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
index 653fc81..c137d12 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
@@ -79,7 +79,7 @@
control_vlan: '2416'
cookiecutter_template_branch: proposed
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.49.62
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.0/26
@@ -116,10 +116,13 @@
openstack_benchmark_node01_address: 10.167.11.95
openstack_benchmark_node01_hostname: bmk01
openstack_cluster_size: compact
- openstack_compute_count: '3'
+ openstack_compute_count: '2'
openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.11
- openstack_compute_rack01_tenant_subnet: 10.167.12
+ openstack_compute_rack01_single_subnet: 172.16.10
+ openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 10.167.11.10
openstack_control_hostname: ctl
openstack_control_node01_address: 10.167.11.11
@@ -190,7 +193,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 172.16.49.2
shared_reclass_branch: proposed
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
stacklight_enabled: 'False'
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
index d694e84..ce9318e 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
@@ -5,7 +5,7 @@
{% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-baremetal-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-baremetal-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml
index 3f4f128..48bf712 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- #- cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -46,21 +44,16 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- echo "Preparing base OS"
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
write_files:
- path: /etc/network/interfaces
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
index 23eb24c..15e22ba 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
@@ -73,9 +73,9 @@
net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.128/26:26') }}
params:
ip_reserved:
- gateway: +1
- l2_network_device: -2
-
+ gateway: '172.17.42.129'
+ ip_ranges:
+ dhcp: ['172.17.42.130', '172.17.42.180']
groups:
- name: virtual
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml
deleted file mode 100644
index d559d73..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml
+++ /dev/null
@@ -1,163 +0,0 @@
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-- description: Install keepalived on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install etcd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@etcd:server' state.sls etcd.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install certs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@etcd:server' salt.minion -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install etcd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@etcd:server' state.sls etcd.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the etcd health
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@etcd:server' cmd.run '. /var/lib/etcd/configenv && etcdctl cluster-health'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Kubernetes Addons
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Kubernetes components
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:pool' state.sls kubernetes.pool
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 60}
- skip_fail: false
-
-# Opencontrail Control Plane
-
-- description: Create configuration files for OpenContrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Configure OpenContrail as an add-on for Kubernetes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Verify the status of the OpenContrail service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Set up the OpenContrail resources
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# OpenContrail vrouters
-- description: Refresh pillars on cmp*
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'cmp*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync all on contrail computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' saltutil.sync_all
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Apply highstate on contrail computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.highstate exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Reboot contrail computes
- cmd: salt --timeout=600 -C 'I@opencontrail:compute' system.reboot
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Apply opencontrail.client on contrail computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Run Kubernetes master without setup
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: true
-
-- description: Run Kubernetes master setup
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master' --subset 1 state.sls kubernetes.master.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Restart Kubelet
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:pool' service.restart 'kubelet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Check nodes registrations
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:pool' cmd.run 'sleep 60; kubectl get nodes'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Renew hosts file on a whole cluster
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '*' state.sls linux.network.host;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml
deleted file mode 100644
index c5648a8..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml
+++ /dev/null
@@ -1,149 +0,0 @@
-nodes:
- cfg01.bm-mcp-pike-k8s-contrail.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- # Physical nodes
-
- kvm01.bm-mcp-pike-k8s-contrail.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: single_ctl
-
- kvm02.bm-mcp-pike-k8s-contrail.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: single_ctl
-
- kvm03.bm-mcp-pike-k8s-contrail.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: single_ctl
-
- ctl01.bm-mcp-pike-k8s-contrail.local:
- reclass_storage_name: kubernetes_control_node01
- roles:
- - kubernetes_control_contrail
- - linux_system_codename_xenial
- interfaces:
- enp2s0f0:
- role: single_mgm
- deploy_address: 172.17.41.9
- enp2s0f1:
- role: single_ctl
- single_address: 10.167.8.239
-
- ctl02.bm-mcp-pike-k8s-contrail.local:
- reclass_storage_name: kubernetes_control_node02
- roles:
- - kubernetes_control_contrail
- - linux_system_codename_xenial
- interfaces:
- enp2s0f0:
- role: single_mgm
- deploy_address: 172.17.41.10
- enp2s0f1:
- role: single_ctl
- single_address: 10.167.8.238
-
- ctl03.bm-mcp-pike-k8s-contrail.local:
- reclass_storage_name: kubernetes_control_node03
- roles:
- - kubernetes_control_contrail
- - linux_system_codename_xenial
- interfaces:
- enp2s0f0:
- role: single_mgm
- deploy_address: 172.17.41.11
- enp2s0f1:
- role: single_ctl
- single_address: 10.167.8.237
-
- # prx01.bm-mcp-pike-k8s-contrail.local:
- # reclass_storage_name: kubernetes_proxy_node01
- # roles:
- # - kubernetes_proxy
- # # - infra_proxy
- # # - stacklight_proxy
- # - salt_master_host
- # - linux_system_codename_xenial
- # interfaces:
- # enp9s0f0:
- # role: single_mgm
- # deploy_address: 172.17.41.8
- # enp9s0f1:
- # role: single_ctl
- # single_address: 10.167.8.81
-
- cmp001.bm-mcp-pike-k8s-contrail.local:
- reclass_storage_name: kubernetes_compute_node001
- roles:
- - linux_system_codename_xenial
- - kubernetes_compute_contrail
- - salt_master_host
- #- features_lvm_backend
- interfaces:
- enp9s0f0:
- role: single_dhcp
- ens11f0:
- role: bond0_ab_contrail
- tenant_address: 192.168.0.101
- ens11f1:
- role: single_ctl
- single_address: 10.167.8.101
-
- cmp002.bm-mcp-pike-k8s-contrail.local:
- reclass_storage_name: kubernetes_compute_node002
- roles:
- - linux_system_codename_xenial
- - kubernetes_compute_contrail
- - salt_master_host
- #- features_lvm_backend
- interfaces:
- enp9s0f0:
- role: single_dhcp
- ens11f0:
- role: bond0_ab_contrail
- tenant_address: 192.168.0.102
- ens11f1:
- role: single_ctl
- single_address: 10.167.8.102
-
- # cmp002.bm-mcp-pike-k8s-contrail.local:
- # reclass_storage_name: kubernetes_compute_node02
- # roles:
- # - features_lvm_backend
- # - linux_system_codename_xenial
- # - kubernetes_compute_contrail
- # interfaces:
- # enp2s0f1:
- # role: single_mgm
- # deploy_address: 172.16.49.74
- # enp5s0f0:
- # role: bond0_ab_contrail
- # tenant_address: 192.168.0.102
- # enp5s0f1:
- # role: single_vlan_ctl
- # single_address: 10.167.8.102
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml
deleted file mode 100644
index 18032a1..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml
+++ /dev/null
@@ -1,211 +0,0 @@
-nodes:
- # Virtual Control Plane nodes
-# commented as ctl is bm
-# ctl01.cookied-bm-mcp-ocata-contrail.local:
-# reclass_storage_name: kubernetes_control_node01
-# roles:
-# - kubernetes_control_contrail
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
-#
-# ctl02.cookied-bm-mcp-ocata-contrail.local:
-# reclass_storage_name: kubernetes_control_node02
-# roles:
-# - kubernetes_control_contrail
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
-#
-# ctl03.cookied-bm-mcp-ocata-contrail.local:
-# reclass_storage_name: kubernetes_control_node03
-# roles:
-# - kubernetes_control_contrail
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
-
-# commented as there is no k8s proxy nodes in this setup
-# prx01.cookied-bm-mcp-ocata-contrail.local:
-# reclass_storage_name: kubernetes_proxy_node01
-# roles:
-# - kubernetes_proxy
-# # - infra_proxy
-# # - stacklight_proxy
-# - salt_master_host
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
-
-# prx02.cookied-bm-mcp-ocata-contrail.local:
-# reclass_storage_name: kubernetes_proxy_node02
-# roles:
-# - kubernetes_proxy
-# # - infra_proxy
-# # - stacklight_proxy
-# - salt_master_host
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
-
- mon01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
-# commented as shpuld be in pod
-# nal01.cookied-bm-mcp-ocata-contrail.local:
-# reclass_storage_name: opencontrail_analytics_node01
-# roles:
-# - opencontrail_analytics
-# - linux_system_codename_xenial
-# - salt_master_host
-# interfaces:
-# ens3:
-# role: single_ctl
-# single_address: 10.167.8.31
-#
-# nal02.cookied-bm-mcp-ocata-contrail.local:
-# reclass_storage_name: opencontrail_analytics_node02
-# roles:
-# - opencontrail_analytics
-# - linux_system_codename_xenial
-# - salt_master_host
-# interfaces:
-# ens3:
-# role: single_ctl
-# single_address: 10.167.8.32
-#
-# nal03.cookied-bm-mcp-ocata-contrail.local:
-# reclass_storage_name: opencontrail_analytics_node03
-# roles:
-# - opencontrail_analytics
-# - linux_system_codename_xenial
-# - salt_master_host
-# interfaces:
-# ens3:
-# role: single_ctl
-# single_address: 10.167.8.33
-#
-# ntw01.cookied-bm-mcp-ocata-contrail.local:
-# reclass_storage_name: opencontrail_control_node01
-# roles:
-# - opencontrail_control
-# - linux_system_codename_xenial
-# - salt_master_host
-# interfaces:
-# ens3:
-# role: single_ctl
-# single_address: 10.167.8.21
-#
-# ntw02.cookied-bm-mcp-ocata-contrail.local:
-# reclass_storage_name: opencontrail_control_node02
-# roles:
-# - opencontrail_control
-# - linux_system_codename_xenial
-# - salt_master_host
-# interfaces:
-# ens3:
-# role: single_ctl
-# single_address: 10.167.8.22
-#
-# ntw03.cookied-bm-mcp-ocata-contrail.local:
-# reclass_storage_name: opencontrail_control_node03
-# roles:
-# - opencontrail_control
-# - linux_system_codename_xenial
-# - salt_master_host
-# interfaces:
-# ens3:
-# role: single_ctl
-# single_address: 10.167.8.23
-
- mtr01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
-# bmk01.cookied-bm-mcp-ocata-contrail.local:
-# reclass_storage_name: openstack_benchmark_node01
-# roles:
-# - openstack_benchmark
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/core.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/core.yaml
new file mode 100644
index 0000000..80073cf
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/core.yaml
@@ -0,0 +1,11 @@
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/lab04-physical-inventory.yaml
new file mode 100644
index 0000000..5aa9ebe
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/lab04-physical-inventory.yaml
@@ -0,0 +1,94 @@
+nodes:
+ cfg01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_static_ctl
+ single_address: 10.167.8.99
+
+ # Physical nodes
+ kvm01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ kvm02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ kvm03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ cmp<<count>>:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - features_lvm_backend_volume_sdb
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f1:
+ enp2s0f1:
+ role: single_dhcp
+ enp5s0f0:
+ role: bond0_ab_contrail
+ enp5s0f1:
+ role: single_vlan_ctl
+
+# cmp001.cookied-bm-oc40-queens.local:
+# reclass_storage_name: openstack_compute_node01
+# roles:
+# - openstack_compute
+# - features_lvm_backend_volume_sdb
+# - linux_system_codename_xenial
+# interfaces:
+# enp2s0f1:
+# role: single_mgm
+# deploy_address: 172.16.49.73
+# enp5s0f0:
+# role: single_contrail_vlan_prv
+# tenant_address: 192.168.0.101
+# enp5s0f1:
+# role: single_vlan_ctl
+# single_address: 10.167.8.101
+# cmp002.cookied-bm-oc40-queens.local:
+# reclass_storage_name: openstack_compute_node02
+# roles:
+# - openstack_compute
+# - features_lvm_backend_volume_sdb
+# - linux_system_codename_xenial
+# interfaces:
+# enp2s0f1:
+# role: single_mgm
+# deploy_address: 172.16.49.74
+# enp5s0f0:
+# role: single_contrail_vlan_prv
+# tenant_address: 192.168.0.102
+# enp5s0f1:
+# role: single_vlan_ctl
+# single_address: 10.167.8.102
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml
new file mode 100644
index 0000000..7dff4de
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml
@@ -0,0 +1,287 @@
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+
+{% set PATTERN = os_env('PATTERN', 'false') %}
+{% set RUN_TEMPEST = os_env('RUN_TEMPEST', 'false') %}
+
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+# Install OpenStack control services
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
+
+- description: WR Install cinder volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:volume' state.sls cinder
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
+
+# install contrail
+
+- description: Install Docker services
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host' match.pillar 'docker:host' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host' state.sls docker.host
+ fi; sleep 10;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install opencontrail database services on first minion
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install opencontrail database services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database' state.sls opencontrail.database
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail control services on first minion
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail control services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail collectors on first minion
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:collector and *01*' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail collectors
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Spawn Opencontrail docker images
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control or I@opencontrail:collector' state.sls docker.client && sleep 15;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Finalize opencontrail services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 30}
+ skip_fail: false
+
+- description: Finalize opencontrail services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:client and not I@opencontrail:compute' state.sls opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Finalize opencontrail services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 30}
+ skip_fail: true
+
+- description: Check contrail status
+ cmd: sleep 15; salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Reboot computes
+ cmd: |
+ salt "cmp*" system.reboot;
+ sleep 600;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Remove crashes files from /var/crashes/ while vrouter was crashed
+ cmd: salt "cmp*" cmd.run "rm -rf /var/crashes/*"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Apply Opencontrail compute
+ cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 30}
+ skip_fail: false
+
+- description: Apply Opencontrail compute
+ cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Check status for contrail services
+ cmd: |
+ sleep 15;
+ salt -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Hack resolv.conf on VCP nodes for internal services access
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Create heat-net before external net create
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create heat-net'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create public network for contrail
+ cmd: |
+ salt 'ntw01*' contrail.virtual_network_create public '{"external":true,"ip_prefix":"192.168.200.0","ip_prefix_len":24,"asn":64512,"target":10000}'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create heat-net 10.20.30.0/24 --allocation-pool start=10.20.30.10,end=10.20.30.254 --gateway 10.20.30.1 --name heat-subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create heat-router'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set heat-router public'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Steps from neutron client for contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add heat-router heat-subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+# Starting prepare runtest
+
+- description: Upload tempest template
+ upload:
+ local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+ local_filename: runtest.yml
+ remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/
+ node_name: {{ HOSTNAME_CFG01 }}
+ skip_fail: False
+
+- description: Include class with tempest template into cfg node
+ cmd: |
+ sed -i 's/classes\:/classes\:\n- cluster.{{ LAB_CONFIG_NAME }}.infra.runtest/g' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml;
+ salt '*' saltutil.refresh_pillar;
+ salt '*' saltutil.sync_all;
+ salt 'ctl01*' pkg.install docker.io;
+ salt 'ctl01*' cmd.run 'iptables --policy FORWARD ACCEPT';
+ salt 'cfg01*' state.sls salt.minion && sleep 20;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Enforce keystone client
+ cmd: |
+ salt 'cfg01*' state.sls keystone.client;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Create flavors for tests
+ cmd: |
+ salt 'cfg01*' state.sls nova.client;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Upload cirros image
+ cmd: |
+ salt 'cfg01*' state.sls glance.client;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Generate tempest config
+ cmd: |
+ salt 'cfg01*' state.sls runtest;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Download cirros image for runtest
+ cmd: |
+ wget http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img -O /tmp/TestCirros-0.3.5.img
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Run tempest from new docker image
+ cmd: |
+ OPENSTACK_VERSION=`salt-call --out=newline_values_only pillar.get _param:openstack_version`;
+ docker run -e ARGS="-r test -w 2" -v /tmp/test/tempest.conf:/etc/tempest/tempest.conf -v /tmp/:/tmp/ -v /tmp/test:/root/tempest -v /etc/ssl/certs/:/etc/ssl/certs/ --rm docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:$OPENSTACK_VERSION /bin/bash -c "run-tempest";
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Download xml results
+ download:
+ remote_path: /tmp/test/
+ remote_filename: "report_*.xml"
+ local_path: {{ os_env('PWD') }}
+ node_name: {{ HOSTNAME_CTL01 }}
+ skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml b/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml
new file mode 100644
index 0000000..f0d6d8a
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml
@@ -0,0 +1,47 @@
+classes:
+- service.runtest.tempest
+- service.runtest.tempest.public_net
+- service.runtest.tempest.services.manila.glance
+parameters:
+ _param:
+ glance_image_cirros_location: http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img
+ glance_image_fedora_location: http://cz8133.bud.mirantis.net:8099/Fedora-Cloud-Base-27-1.6.x86_64.qcow2
+ glance_image_manila_location: http://cz8133.bud.mirantis.net:8099/manila-service-image-master.qcow2
+ openstack_public_neutron_subnet_allocation_end: 192.168.200.220
+ openstack_public_neutron_subnet_allocation_start: 192.168.200.130
+ openstack_public_neutron_subnet_cidr: 192.168.200.0/24
+ openstack_public_neutron_subnet_gateway: 192.168.200.1
+ runtest_tempest_cfg_dir: /tmp/test
+ runtest_tempest_cfg_name: tempest.conf
+ runtest_tempest_public_net: public
+ tempest_test_target: ctl01*
+ neutron:
+ client:
+ enabled: true
+ runtest:
+ enabled: true
+ keystonerc_node: ctl01*
+ tempest:
+ DEFAULT:
+ log_file: tempest.log
+ cfg_dir: ${_param:runtest_tempest_cfg_dir}
+ cfg_name: ${_param:runtest_tempest_cfg_name}
+ compute:
+ min_compute_nodes: 2
+ convert_to_uuid:
+ network:
+ public_network_id: ${_param:runtest_tempest_public_net}
+ enabled: true
+ heat_plugin:
+ build_timeout: '600'
+ put_keystone_rc_enabled: false
+ put_local_image_file_enabled: false
+ share:
+ capability_snapshot_support: true
+ run_driver_assisted_migration_tests: false
+ run_manage_unmanage_snapshot_tests: false
+ run_manage_unmanage_tests: false
+ run_migration_with_preserve_snapshots_tests: false
+ run_quota_tests: true
+ run_replication_tests: false
+ run_snapshot_tests: true
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-cookiecutter-contrail.yaml
similarity index 90%
copy from tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml
copy to tcp_tests/templates/cookied-bm-oc40-queens/salt-context-cookiecutter-contrail.yaml
index a9e33af..bfcd153 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-cookiecutter-contrail.yaml
@@ -30,15 +30,6 @@
backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
bmk_enabled: 'False'
ceph_enabled: 'False'
- openstack_nfv_dpdk_enabled: 'True'
- openstack_nfv_sriov_enabled: 'True'
- openstack_nfv_sriov_network: physnet1
- openstack_nfv_sriov_numvfs: '7'
- openstack_nfv_sriov_pf_nic: enp5s0f1
- openstack_nova_compute_hugepages_count: '16'
- openstack_nova_compute_nfv_req_enabled: 'True'
- openstack_nova_cpu_pinning: 6,7,8,9,10,11
-
cicd_control_node01_address: 10.167.8.91
cicd_control_node01_hostname: cid01
cicd_control_node02_address: 10.167.8.92
@@ -77,8 +68,8 @@
l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
-----END RSA PRIVATE KEY-----
cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
- cluster_domain: cookied-bm-4.0-contrail.local
- cluster_name: cookied-bm-4.0-contrail
+ cluster_domain: cookied-bm-oc40-queens.local
+ cluster_name: cookied-bm-oc40-queens
opencontrail_version: 4.0
linux_repo_contrail_component: oc40
compute_bond_mode: active-backup
@@ -91,13 +82,13 @@
control_vlan: '2422'
cookiecutter_template_branch: ''
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.49.65
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
deployment_type: physical
- dns_server01: 172.18.208.44
- dns_server02: 8.8.4.4
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
email_address: sgudz@mirantis.com
infra_bond_mode: active-backup
infra_deploy_nic: eth0
@@ -149,10 +140,8 @@
opencontrail_control_node03_address: 10.167.8.23
opencontrail_control_node03_hostname: ntw03
opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.100
+ opencontrail_router01_address: 10.167.8.220
opencontrail_router01_hostname: rtr01
- opencontrail_router02_address: 10.167.8.101
- opencontrail_router02_hostname: rtr02
openldap_enabled: 'False'
openssh_groups: ''
openstack_benchmark_node01_address: 10.167.8.95
@@ -162,6 +151,18 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 10.167.8
openstack_compute_rack01_tenant_subnet: 192.168.0
+ openstack_compute_single_address_ranges: 10.167.8.101-10.167.8.102
+ openstack_compute_deploy_address_ranges: 172.16.49.73-172.16.49.74
+ openstack_compute_tenant_address_ranges: 192.168.0.101-192.168.0.102
+ openstack_compute_backend_address_ranges: 192.168.0.101-192.168.0.102
+ openstack_compute_node01_hostname: cmp01
+ openstack_compute_node02_hostname: cmp02
+ openstack_compute_node01_address: 10.167.8.101
+ openstack_compute_node02_address: 10.167.8.102
+ openstack_compute_node01_single_address: 10.167.8.101
+ openstack_compute_node02_single_address: 10.167.8.102
+ openstack_compute_node01_deploy_address: 172.16.49.73
+ openstack_compute_node02_deploy_address: 172.16.49.74
openstack_control_address: 10.167.8.10
openstack_control_hostname: ctl
openstack_control_node01_address: 10.167.8.11
@@ -201,7 +202,7 @@
openstack_proxy_node02_address: 10.167.8.82
openstack_proxy_node02_hostname: prx02
openstack_upgrade_node01_address: 10.167.8.19
- openstack_version: pike
+ openstack_version: queens
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
oss_webhook_app_id: '24'
@@ -216,7 +217,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 172.16.49.66
shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
stacklight_enabled: 'True'
stacklight_log_address: 10.167.8.60
stacklight_log_hostname: log
@@ -226,7 +227,7 @@
stacklight_log_node02_hostname: log02
stacklight_log_node03_address: 10.167.8.63
stacklight_log_node03_hostname: log03
- stacklight_long_term_storage_type: influxdb
+ stacklight_long_term_storage_type: prometheus
stacklight_monitor_address: 10.167.8.70
stacklight_monitor_hostname: mon
stacklight_monitor_node01_address: 10.167.8.71
@@ -245,12 +246,12 @@
stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.1
+ tenant_network_gateway: 192.168.0.220
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: 192.168.0.0/24
tenant_vlan: '2423'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'True'
- openldap_domain: cookied-bm-4.0-contrail.local
+ openldap_domain: cookied-bm-oc40-queens.local
openldap_enabled: 'True'
openldap_organisation: ${_param:cluster_name}
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-environment.yaml
new file mode 100644
index 0000000..90d7a3d
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-environment.yaml
@@ -0,0 +1,271 @@
+nodes:
+ # Virtual Control Plane nodes
+ cid01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ cid02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ cid03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ctl01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ctl02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ctl03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ dbs01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ dbs02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ dbs03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ msg01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ msg02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ msg03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ prx01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ prx02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mon01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mon02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mon03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ nal01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: opencontrail_analytics_node01
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ nal02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: opencontrail_analytics_node02
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ nal03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: opencontrail_analytics_node03
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ntw01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: opencontrail_control_node01
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ntw02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: opencontrail_control_node02
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ntw03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: opencontrail_control_node03
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mtr01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mtr02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mtr03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ log01.cookied-bm-oc40-queens.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ log02.cookied-bm-oc40-queens.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ log03.cookied-bm-oc40-queens.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+# bmk01.cookied-bm-oc40-queens.local:
+# reclass_storage_name: openstack_benchmark_node01
+# roles:
+# - openstack_benchmark
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/salt.yaml
similarity index 70%
copy from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
copy to tcp_tests/templates/cookied-bm-oc40-queens/salt.yaml
index 951075e..3853acd 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/salt.yaml
@@ -1,21 +1,17 @@
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','bm-mcp-pike-k8s-contrail') %}
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-oc40-queens') %}
# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-k8s-contrail.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-upgrade-physical-inventory.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2410') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2411') %}
-
+{%- set CLUSTER_CONTEXT_NAME = os_env('CLUSTER_CONTEXT_NAME', 'salt-context-cookiecutter-contrail.yaml') %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
{% import 'shared-salt.yaml' as SHARED with context %}
@@ -26,20 +22,11 @@
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-- description: "Workaround for rack01 compute generator"
- cmd: |
- set -e;
- # Remove rack01 key
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.kubernetes_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
- description: "Change path to internal storage for salt.control images"
cmd: |
set -e;
@@ -50,6 +37,14 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
- description: Temporary WR for correct bridge name according to envoronment templates
cmd: |
sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
@@ -58,29 +53,13 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Rerun openssh after env model is generated
- cmd: |
- salt-call state.sls openssh
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
{{ SHARED.MACRO_GENERATE_INVENTORY() }}
{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-- description: "Disable kubelet_fail_on_swap"
- cmd: |
- set -e;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.kubelet_fail_on_swap false /srv/salt/reclass/classes/system/kubernetes/common.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
- description: Update minion information
cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
node_name: {{ HOSTNAME_CFG01 }}
@@ -126,15 +105,13 @@
retry: {count: 20, delay: 30}
skip_fail: false
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
#########################################
# Configure all running salt minion nodes
#########################################
- description: Hack resolv.conf on VCP nodes for internal services access
cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.17.41.2' > /etc/resolv.conf;"
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -157,6 +134,8 @@
retry: {count: 1, delay: 5}
skip_fail: false
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
@@ -171,14 +150,3 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: true
-
-- description: "Lab04 workaround: Control network access from cfg01 node using sshuttle via kvm01"
- cmd: |
- set -e;
- set -x;
- KVM01_DEPLOY_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:infra_kvm_node01_deploy_address);
- apt-get install -y sshuttle;
- sshuttle -r ${KVM01_DEPLOY_ADDRESS} 10.167.8.0/24 -D >/dev/null;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/sl.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/sl.yaml
new file mode 100644
index 0000000..2ff8f3d
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/sl.yaml
@@ -0,0 +1,15 @@
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-sl.yaml' as SHARED_SL with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
+
+{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
+{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
+{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
+{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
+{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
+{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
+{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
+{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--meta-data.yaml
new file mode 100644
index 0000000..a594a53
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml
similarity index 95%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
rename to tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml
index 646af7a..6c9e48f 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml
@@ -1,103 +1,102 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifdown ens3
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # # Block access to SSH while node is preparing
+ # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifdown ens3
- sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- # - echo "Preparing base OS"
-
- - echo "nameserver 172.17.41.2" > /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ # - echo "Preparing base OS"
+
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+ # - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+
# Configure Ubuntu mirrors
# - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
# - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
# - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - apt-get update
-
- # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- ########################################################
- # Node is ready, allow SSH access
- # - echo "Allow SSH access ..."
- # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
+ # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ # - apt-get clean
+ # - apt-get update
+
+ # Install common packages
+ # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Install salt-minion and stop it until it is configured
+ # - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ ########################################################
+ # Node is ready, allow SSH access
+ # - echo "Allow SSH access ..."
+ # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml
similarity index 78%
copy from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml
index 646af7a..106c3d5 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml
@@ -1,103 +1,99 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- # - echo "Preparing base OS"
-
- - echo "nameserver 172.17.41.2" > /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # # Block access to SSH while node is preparing
+ # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup {interface_name}
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ # - echo "Preparing base OS"
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+ # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
# Configure Ubuntu mirrors
# - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
# - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
# - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - apt-get update
-
- # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- ########################################################
- # Node is ready, allow SSH access
- # - echo "Allow SSH access ..."
- # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
+ # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ # - apt-get clean
+ # - eatmydata apt-get update && apt-get -y upgrade
+
+ # Install common packages
+ # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Install salt-minion and stop it until it is configured
+ # - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ # Install latest kernel
+ # - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
+
+ ########################################################
+ # Node is ready, allow SSH access
+ #- echo "Allow SSH access ..."
+ #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ # - reboot
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ # The loopback network interface
+ auto lo
+ iface lo inet loopback
+ auto {interface_name}
+ iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml
similarity index 79%
copy from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml
index 646af7a..915981e 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml
@@ -1,103 +1,95 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- # - echo "Preparing base OS"
-
- - echo "nameserver 172.17.41.2" > /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup {interface_name}
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ # - echo "Preparing base OS"
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+ # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
# Configure Ubuntu mirrors
# - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
# - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
# - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - apt-get update
-
- # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- ########################################################
- # Node is ready, allow SSH access
- # - echo "Allow SSH access ..."
- # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
+ # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ # - apt-get clean
+ # - eatmydata apt-get update && apt-get -y upgrade
+
+ # Install common packages
+ # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Install salt-minion and stop it until it is configured
+ # - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ ########################################################
+ # Node is ready, allow SSH access
+ # - echo "Allow SSH access ..."
+ # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ # The loopback network interface
+ auto lo
+ iface lo inet loopback
+ auto {interface_name}
+ iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay.yaml
similarity index 61%
copy from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
copy to tcp_tests/templates/cookied-bm-oc40-queens/underlay.yaml
index da6afea..e84e22d 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay.yaml
@@ -1,31 +1,34 @@
# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-mcp-pike-k8s-contrail') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-oc40-queens') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
-{% set ETH0_IP_ADDRESS_CFG01 = os_env('ETH0_IP_ADDRESS_CFG01', '172.17.41.3') %}
-{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.17.41.4') %}
-{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.17.41.5') %}
-{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.17.41.6') %}
-{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.17.41.7') %}
-{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.17.41.8') %}
-{% set ETH0_IP_ADDRESS_CTL01 = os_env('ETH0_IP_ADDRESS_CTL01', '172.17.41.9') %}
-{% set ETH0_IP_ADDRESS_CTL02 = os_env('ETH0_IP_ADDRESS_CTL02', '172.17.41.10') %}
-{% set ETH0_IP_ADDRESS_CTL03 = os_env('ETH0_IP_ADDRESS_CTL03', '172.17.41.11') %}
-{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
-{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml' as CLOUDINIT_USER_DATA_HWE_CMP with context %}
+#{% set HOSTNAME_CMP003 = os_env('HOSTNAME_CMP003', 'cmp003.' + DOMAIN_NAME) %}
+#{% set HOSTNAME_KVM04 = os_env('HOSTNAME_KVM04', 'kvm04.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
+{% set ETH2_IP_ADDRESS_CFG01 = os_env('ETH2_IP_ADDRESS_CFG01', '10.167.8.99') %}
+{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
+{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.68') %}
+{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.69') %}
+{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.73') %}
+{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
+#{% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.49.121') %}
+#{% set ETH0_IP_ADDRESS_KVM04 = os_env('ETH0_IP_ADDRESS_KVM04', '172.16.49.122') %}
+# {% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
+
+{% import 'cookied-bm-oc40-queens/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-bm-oc40-queens/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
+{% import 'cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_HWE with context %}
---
aliases:
@@ -33,57 +36,56 @@
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
- - &cloudinit_user_data_hwe_cmp {{ CLOUDINIT_USER_DATA_HWE_CMP }}
+ - &cloudinit_user_data_hwe {{ CLOUDINIT_USER_DATA_HWE }}
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-bm-mcp-pike-k8s-contrail_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'cookied-bm-oc4_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.17.41.0/26:26') }}
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
params:
ip_reserved:
- gateway: +1
+ gateway: +62
l2_network_device: +61
- default_{{ HOSTNAME_CFG01 }}: {{ ETH0_IP_ADDRESS_CFG01 }}
+ default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
- default_{{ HOSTNAME_CTL01 }}: {{ ETH0_IP_ADDRESS_CTL01 }}
- default_{{ HOSTNAME_CTL02 }}: {{ ETH0_IP_ADDRESS_CTL02 }}
- default_{{ HOSTNAME_CTL03 }}: {{ ETH0_IP_ADDRESS_CTL03 }}
-
- virtual_{{ HOSTNAME_CFG01 }}: {{ ETH0_IP_ADDRESS_CFG01 }}
+ #default_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
+ #default_{{ HOSTNAME_KVM04 }}: {{ ETH0_IP_ADDRESS_KVM04 }}
+ #default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
- virtual_{{ HOSTNAME_CTL01 }}: {{ ETH0_IP_ADDRESS_CTL01 }}
- virtual_{{ HOSTNAME_CTL02 }}: {{ ETH0_IP_ADDRESS_CTL02 }}
- virtual_{{ HOSTNAME_CTL03 }}: {{ ETH0_IP_ADDRESS_CTL03 }}
+ #virtual_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
+ #virtual_{{ HOSTNAME_KVM04 }}: {{ ETH0_IP_ADDRESS_KVM04 }}
+ # virtual_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
#ip_ranges:
# dhcp: [+2, -4]
private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.13.0/24:24') }}
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
params:
ip_reserved:
+ virtual_{{ HOSTNAME_CFG01 }}: {{ ETH2_IP_ADDRESS_CFG01 }}
gateway: +1
l2_network_device: +1
tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.14.0/24:24') }}
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '192.168.5.0/24:24') }}
params:
ip_reserved:
gateway: +1
l2_network_device: +1
external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.41.128/26:26') }}
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '192.168.200.0/24:24') }}
params:
ip_reserved:
gateway: +1
@@ -112,6 +114,9 @@
dhcp: false
parent_iface:
phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
+ private:
+ parent_iface:
+ phys_dev: !os_env CONTROL_IFACE
group_volumes:
- name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
@@ -149,18 +154,17 @@
- label: ens3
l2_network_device: admin
interface_model: *interface_model
- mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
- #- label: ens4
- # l2_network_device: private
- # interface_model: *interface_model
+ mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
network_config:
ens3:
networks:
- admin
- #ens4:
- # networks:
- # - private
-
+ ens4:
+ networks:
+ - private
- name: default
driver:
@@ -222,52 +226,6 @@
# enp3s0f1:
# networks:
# - admin
- # - name: {{ HOSTNAME_PRX01 }}
- # role: salt_minion
- # params:
- # ipmi_user: !os_env IPMI_USER
- # ipmi_password: !os_env IPMI_PASSWORD
- # ipmi_previlegies: OPERATOR
- # ipmi_host: !os_env IPMI_HOST_PRX01 # hostname or IP address
- # ipmi_lan_interface: lanplus
- # ipmi_port: 623
-
- # root_volume_name: system # see 'volumes' below
- # cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
- # volumes:
- # - name: system
- # capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # # The same as for agent URL, here is an URL to the image that should be
- # # used for deploy the node. It should also be accessible from deploying
- # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
-
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data
-
- # interfaces:
- # - label: enp9s0f0
- # l2_network_device: admin
- # mac_address: !os_env ETH0_MAC_ADDRESS_PRX01
- # - label: enp9s0f1
- # mac_address: !os_env ETH1_MAC_ADDRESS_PRX01
-
- # network_config:
- # enp9s0f0:
- # networks:
- # - admin
- # bond0:
- # networks:
- # - control
- # aggregation: active-backup
- # parents:
- # - enp9s0f1
- name: {{ HOSTNAME_KVM01 }}
role: salt_minion
@@ -414,133 +372,57 @@
parents:
- enp9s0f1
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CTL01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp2s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_CTL01
- - label: enp2s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_CTL01
-
- network_config:
- enp2s0f0:
- networks:
- - admin
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD_CTL
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CTL02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp2s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_CTL02
- - label: enp2s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_CTL02
-
- network_config:
- enp2s0f0:
- networks:
- - admin
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD_CTL
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CTL03 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: eno1 # see 'interfaces' below.
- cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- # - label: eno1
- - label: enp2s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_CTL03
- # - label: eno2
- - label: enp2s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_CTL03
-
- network_config:
- # eno1:
- enp2s0f0:
- networks:
- - admin
-
+ # - name: {{ HOSTNAME_KVM04 }}
+ # role: salt_minion
+ # params:
+ # ipmi_user: !os_env IPMI_USER
+ # ipmi_password: !os_env IPMI_PASSWORD
+ # ipmi_previlegies: OPERATOR
+ # ipmi_host: !os_env IPMI_HOST_KVM04 # hostname or IP address
+ # ipmi_lan_interface: lanplus
+ # ipmi_port: 623
+ #
+ # root_volume_name: system # see 'volumes' below
+ # cloud_init_volume_name: iso # see 'volumes' below
+ # # cloud_init_iface_up: eno1 # see 'interfaces' below.
+ # cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ # volumes:
+ # - name: system
+ # capacity: !os_env NODE_VOLUME_SIZE, 200
+ #
+ # # The same as for agent URL, here is an URL to the image that should be
+ # # used for deploy the node. It should also be accessible from deploying
+ # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+ #
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+ #
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data
+ #
+ # interfaces:
+ # # - label: eno1
+ # - label: enp2s0f0
+ # l2_network_device: admin
+ # mac_address: !os_env ETH0_MAC_ADDRESS_KVM04
+ # # - label: eno2
+ # - label: enp2s0f1
+ # mac_address: !os_env ETH1_MAC_ADDRESS_KVM04
+ #
+ # network_config:
+ # # eno1:
+ # enp2s0f0:
+ # networks:
+ # - admin
+ # bond0:
+ # networks:
+ # - control
+ # aggregation: active-backup
+ # parents:
+ # - enp2s0f1
+ #
- name: {{ HOSTNAME_CMP001 }}
role: salt_minion
params:
@@ -554,7 +436,7 @@
root_volume_name: system # see 'volumes' below
cloud_init_volume_name: iso # see 'volumes' below
# cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
- cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
+ cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
@@ -569,34 +451,28 @@
# for store image with cloud-init metadata.
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_hwe_cmp
+ cloudinit_user_data: *cloudinit_user_data_hwe
interfaces:
- - label: enp9s0f0
- l2_network_device: admin
+ - label: enp2s0f0
mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
- - label: enp9s0f1
+ - label: enp2s0f1
+ l2_network_device: admin
mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
- # - label: enp5s0f0
- # mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
- # features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
- # - label: enp5s0f1
- # mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
- # features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+ - label: enp5s0f0
+ mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+ - label: enp5s0f1
+ mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
# - label: enp5s0f2
# mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
# features: ['dpdk', 'dpdk_pci: 0000:05:00.2']
network_config:
- enp9s0f0:
+ enp2s0f1:
networks:
- admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
- name: {{ HOSTNAME_CMP002 }}
role: salt_minion
@@ -610,8 +486,8 @@
root_volume_name: system # see 'volumes' below
cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
- cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
+ # cloud_init_iface_up: eno1 # see 'interfaces' below.
+ cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
@@ -626,21 +502,73 @@
# for store image with cloud-init metadata.
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_hwe_cmp
+ cloudinit_user_data: *cloudinit_user_data_hwe
interfaces:
- - label: enp9s0f0
- l2_network_device: admin
+ # - label: eno1
+ - label: enp2s0f0
mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
- - label: enp9s0f1
+ # - label: eth0
+ - label: enp2s0f1
+ l2_network_device: admin
mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
+ # - label: eth3
+ - label: enp5s0f0
+ mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+ # - label: eth2
+ - label: enp5s0f1
+ mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+ # - label: eth4
+ # mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
+ # features: ['dpdk', 'dpdk_pci: 0000:0b:00.0']
+
network_config:
- enp9s0f0:
+ enp2s0f1:
networks:
- admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
+
+ # - name: {{ HOSTNAME_CMP003 }}
+ # role: salt_minion
+ # params:
+ # ipmi_user: !os_env IPMI_USER
+ # ipmi_password: !os_env IPMI_PASSWORD
+ # ipmi_previlegies: OPERATOR
+ # ipmi_host: !os_env IPMI_HOST_CMP003 # hostname or IP address
+ # ipmi_lan_interface: lanplus
+ # ipmi_port: 623
+ #
+ # root_volume_name: system # see 'volumes' below
+ # cloud_init_volume_name: iso # see 'volumes' below
+ # # cloud_init_iface_up: eno1 # see 'interfaces' below.
+ # cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ # volumes:
+ # - name: system
+ # capacity: !os_env NODE_VOLUME_SIZE, 200
+ #
+ # # The same as for agent URL, here is an URL to the image that should be
+ # # used for deploy the node. It should also be accessible from deploying
+ # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+ #
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+ #
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data_hwe
+ #
+ # interfaces:
+ # # - label: eno1
+ # - label: enp2s0f1
+ # mac_address: !os_env ETH1_MAC_ADDRESS_CMP003
+ # # - label: eth0
+ # - label: enp2s0f0
+ # l2_network_device: admin
+ # mac_address: !os_env ETH0_MAC_ADDRESS_CMP003
+ #
+ # network_config:
+ # enp2s0f0:
+ # networks:
+ # - admin
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml
index fa2ffb2..c8612bb 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml
@@ -50,7 +50,7 @@
control_vlan: '2422'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.49.126
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
@@ -187,7 +187,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 172.16.49.66
shared_reclass_refspec: refs/changes/44/16144/1
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
stacklight_enabled: 'False'
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml
index da9c583..e7584f8 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml
@@ -50,7 +50,7 @@
control_vlan: '2422'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.49.126
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
@@ -262,7 +262,7 @@
sfdc_password: admin
sfdc_sandbox_enabled: 'False'
sfdc_username: admin
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
stacklight_enabled: 'True'
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
index c2cb615..fa2d723 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
@@ -28,11 +28,11 @@
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-- description: 'Workaround for typo in salt.minion.service (https://gerrit.mcp.mirantis.net/#/c/14806/)'
+- description: 'Workaround for typo in salt.minion.service (https://gerrit.mcp.mirantis.com/#/c/14806/)'
cmd: |
- git clone https://gerrit.mcp.mirantis.net/salt-formulas/salt /tmp/salt-formula-salt;
+ git clone https://gerrit.mcp.mirantis.com/salt-formulas/salt /tmp/salt-formula-salt;
pushd /tmp/salt-formula-salt;
- git fetch https://gerrit.mcp.mirantis.net/salt-formulas/salt refs/changes/06/14806/1 && git checkout FETCH_HEAD;
+ git fetch https://gerrit.mcp.mirantis.com/salt-formulas/salt refs/changes/06/14806/1 && git checkout FETCH_HEAD;
popd;
cp /tmp/salt-formula-salt/salt/minion/service.sls /usr/share/salt-formulas/env/salt/minion/service.sls;
node_name: {{ HOSTNAME_CFG01 }}
@@ -48,9 +48,9 @@
set -e;
# Remove rack01 key
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
# Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
+ reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml --merge;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
index 8faced7..3839f93 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
@@ -45,6 +45,13 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
index fff58d3..438696b 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
@@ -83,7 +83,7 @@
control_vlan: '10'
cookiecutter_template_branch: ''
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 10.167.5.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 10.167.5.0/24
@@ -119,11 +119,26 @@
kubernetes_control_node03_address: 10.167.4.13
kubernetes_control_node03_deploy_address: 10.167.5.13
kubernetes_control_node03_hostname: ctl03
+ kubernetes_compute_count: 4
+ kubernetes_compute_rack01_hostname: cmp
+ kubernetes_compute_deploy_address_ranges: 10.167.5.101-10.167.5.104
+ kubernetes_compute_single_address_ranges: 10.167.4.101-10.167.4.104
+ kubernetes_compute_tenant_address_ranges: 10.167.6.101-10.167.6.104
kubernetes_enabled: 'True'
kubernetes_externaldns_enabled: 'False'
kubernetes_keepalived_vip_interface: br_ctl
kubernetes_network_calico_enabled: 'True'
- kubernetes_virtlet_enabled: 'False'
+ kubernetes_virtlet_enabled: 'True'
+ kubernetes_proxy_hostname: prx
+ kubernetes_proxy_node01_hostname: prx01
+ kubernetes_proxy_node02_hostname: prx02
+ kubernetes_proxy_address: 10.167.4.220
+ kubernetes_proxy_node01_address: 10.167.4.221
+ kubernetes_proxy_node02_address: 10.167.4.222
+ kubernetes_metallb_enabled: 'True'
+ metallb_addresses: 172.17.16.150-172.17.16.190
+ kubernetes_ingressnginx_enabled: 'True'
+ kubernetes_ingressnginx_controller_replicas: 2
local_repositories: 'False'
maas_deploy_address: 10.167.5.15
maas_deploy_range_end: 10.167.5.199
@@ -156,7 +171,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 10.167.5.15
shared_reclass_branch: 'proposed'
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
stacklight_enabled: 'True'
stacklight_log_address: 10.167.4.60
stacklight_log_hostname: log
@@ -191,4 +206,4 @@
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
- vnf_onboarding_enabled: 'False'
+ vnf_onboarding_enabled: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
index 0a07a81..4abe271 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
@@ -9,6 +9,8 @@
role: single_dhcp
ens4:
role: single_static_ctl
+ ens5:
+ role: single_storage_dhcp
kvm01:
reclass_storage_name: infra_kvm_node01
@@ -85,7 +87,9 @@
ens3:
role: single_dhcp
ens4:
- role: single_ctl
+ role: single_ctl_calico
+ ens5:
+ role: single_storage_dhcp
ctl02:
reclass_storage_name: kubernetes_control_node02
@@ -96,7 +100,9 @@
ens3:
role: single_dhcp
ens4:
- role: single_ctl
+ role: single_ctl_calico
+ ens5:
+ role: single_storage_dhcp
ctl03:
reclass_storage_name: kubernetes_control_node03
@@ -107,10 +113,39 @@
ens3:
role: single_dhcp
ens4:
- role: single_ctl
+ role: single_ctl_calico
+ ens5:
+ role: single_storage_dhcp
- cmp001:
- reclass_storage_name: kubernetes_compute_node01
+ prx01:
+ reclass_storage_name: kubernetes_proxy_node01
+ roles:
+ - kubernetes_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: single_storage_dhcp
+
+ prx02:
+ reclass_storage_name: kubernetes_proxy_node02
+ roles:
+ - kubernetes_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: single_storage_dhcp
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>:
+ reclass_storage_name: kubernetes_compute_rack01
roles:
- kubernetes_compute
- linux_system_codename_xenial
@@ -119,21 +154,9 @@
ens3:
role: single_dhcp
ens4:
- role: single_ctl
- single_address: ${_param:kubernetes_compute_node01_address}
-
- cmp002:
- reclass_storage_name: kubernetes_compute_node02
- roles:
- - kubernetes_compute
- - linux_system_codename_xenial
- - salt_master_host
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:kubernetes_compute_node02_address}
+ role: single_ctl_calico
+ ens5:
+ role: single_storage_dhcp
mon01:
reclass_storage_name: stacklight_server_node01
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml
index 8a739fc..70bf54d 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml
@@ -2,7 +2,7 @@
{% from 'cookied-cicd-k8s-calico-sl/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-cicd-k8s-calico-sl/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
index c249522..132a382 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
@@ -13,6 +13,8 @@
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03') %}
{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001') %}
{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002') %}
+{% set HOSTNAME_CMP03 = os_env('HOSTNAME_CMP03', 'cmp003') %}
+{% set HOSTNAME_CMP04 = os_env('HOSTNAME_CMP04', 'cmp004') %}
{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01') %}
{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02') %}
{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03') %}
@@ -23,12 +25,12 @@
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02') %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03') %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01') %}
+{% set HOSTNAME_PRX02 = os_env('HOSTNAME_PRX02', 'prx02') %}
{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01') %}
{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02') %}
{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03') %}
{% import 'cookied-cicd-k8s-calico-sl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-cicd-k8s-calico-sl/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
{% import 'cookied-cicd-k8s-calico-sl/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
{% import 'cookied-cicd-k8s-calico-sl/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
@@ -36,7 +38,6 @@
aliases:
- &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
@@ -57,6 +58,8 @@
default_{{ HOSTNAME_CID01 }}: +91
default_{{ HOSTNAME_CID02 }}: +92
default_{{ HOSTNAME_CID03 }}: +93
+ default_{{ HOSTNAME_PRX01 }}: +221
+ default_{{ HOSTNAME_PRX02 }}: +222
ip_ranges:
dhcp: [+90, -10]
@@ -80,6 +83,8 @@
default_{{ HOSTNAME_CTL03 }}: +13
default_{{ HOSTNAME_CMP01 }}: +101
default_{{ HOSTNAME_CMP02 }}: +102
+ default_{{ HOSTNAME_CMP03 }}: +103
+ default_{{ HOSTNAME_CMP04 }}: +104
default_{{ HOSTNAME_LOG }}: +60
default_{{ HOSTNAME_LOG01 }}: +61
default_{{ HOSTNAME_LOG02 }}: +62
@@ -92,7 +97,8 @@
default_{{ HOSTNAME_MTR01 }}: +86
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_PRX01 }}: +222
+ default_{{ HOSTNAME_PRX01 }}: +221
+ default_{{ HOSTNAME_PRX02 }}: +222
default_{{ HOSTNAME_KVM }}: +240
default_{{ HOSTNAME_KVM01 }}: +241
default_{{ HOSTNAME_KVM02 }}: +242
@@ -129,6 +135,15 @@
default_{{ HOSTNAME_CID01 }}: +91
default_{{ HOSTNAME_CID02 }}: +92
default_{{ HOSTNAME_CID03 }}: +93
+ default_{{ HOSTNAME_CTL01 }}: +11
+ default_{{ HOSTNAME_CTL02 }}: +12
+ default_{{ HOSTNAME_CTL03 }}: +13
+ default_{{ HOSTNAME_CMP01 }}: +101
+ default_{{ HOSTNAME_CMP02 }}: +102
+ default_{{ HOSTNAME_CMP03 }}: +103
+ default_{{ HOSTNAME_CMP04 }}: +104
+ default_{{ HOSTNAME_PRX01 }}: +221
+ default_{{ HOSTNAME_PRX02 }}: +222
ip_ranges:
dhcp: [+10, -10]
@@ -168,7 +183,7 @@
external:
address_pool: external-pool01
- dhcp: false
+ dhcp: true
forward:
mode: nat
@@ -192,8 +207,6 @@
memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
boot:
- hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
volumes:
- name: system
capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
@@ -206,29 +219,26 @@
bus: ide
# source_image: !os_env CFG01_CONFIG_PATH # no source image required.
# it will be uploaded after config drive generation
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
+ interfaces: &all_interfaces
- label: ens3
l2_network_device: admin
interface_model: *interface_model
- label: ens4
l2_network_device: private
interface_model: *interface_model
- network_config:
+ - label: ens5
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
ens3:
networks:
- admin
ens4:
networks:
- private
+ ens5:
+ networks:
+ - external
- name: {{ HOSTNAME_KVM01 }}
role: salt_minion
@@ -402,7 +412,7 @@
role: k8s_controller
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -412,9 +422,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -424,14 +431,14 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CTL02 }}
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -441,9 +448,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -453,14 +457,14 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CTL03 }}
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -470,9 +474,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -482,14 +483,14 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CMP01 }}
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
@@ -508,14 +509,14 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CMP02 }}
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
@@ -534,8 +535,60 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_CMP03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_CMP04 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_MON01 }}
role: salt_minion
@@ -770,3 +823,55 @@
interfaces: *interfaces
network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_PRX02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
index 9b2c7d9..b8bda7e 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
@@ -83,7 +83,7 @@
control_vlan: '10'
cookiecutter_template_branch: ''
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 10.167.5.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 10.167.5.0/24
@@ -119,11 +119,22 @@
kubernetes_control_node03_address: 10.167.4.13
kubernetes_control_node03_deploy_address: 10.167.5.13
kubernetes_control_node03_hostname: ctl03
+ kubernetes_compute_count: 4
+ kubernetes_compute_rack01_hostname: cmp
+ kubernetes_compute_deploy_address_ranges: 10.167.5.101-10.167.5.104
+ kubernetes_compute_single_address_ranges: 10.167.4.101-10.167.4.104
+ kubernetes_compute_tenant_address_ranges: 10.167.6.101-10.167.6.104
kubernetes_enabled: 'True'
kubernetes_externaldns_enabled: 'False'
kubernetes_keepalived_vip_interface: br_ctl
kubernetes_network_calico_enabled: 'True'
kubernetes_virtlet_enabled: 'False'
+ kubernetes_proxy_hostname: prx
+ kubernetes_proxy_node01_hostname: prx01
+ kubernetes_proxy_node02_hostname: prx02
+ kubernetes_proxy_address: 10.167.4.220
+ kubernetes_proxy_node01_address: 10.167.4.221
+ kubernetes_proxy_node02_address: 10.167.4.222
local_repositories: 'False'
maas_deploy_address: 10.167.5.15
maas_deploy_range_end: 10.167.5.199
@@ -152,7 +163,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 10.167.5.15
shared_reclass_branch: 'proposed'
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
stacklight_enabled: 'False'
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
@@ -161,4 +172,4 @@
tenant_network_subnet: 10.167.6.0/24
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
+ use_default_network_scheme: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
index 4c01f4f..d13627b 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
@@ -85,7 +85,7 @@
ens3:
role: single_dhcp
ens4:
- role: single_ctl
+ role: single_ctl_calico
ctl02:
reclass_storage_name: kubernetes_control_node02
@@ -96,7 +96,7 @@
ens3:
role: single_dhcp
ens4:
- role: single_ctl
+ role: single_ctl_calico
ctl03:
reclass_storage_name: kubernetes_control_node03
@@ -107,10 +107,33 @@
ens3:
role: single_dhcp
ens4:
+ role: single_ctl_calico
+
+ prx01:
+ reclass_storage_name: kubernetes_proxy_node01
+ roles:
+ - kubernetes_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
role: single_ctl
- cmp001:
- reclass_storage_name: kubernetes_compute_node01
+ prx02:
+ reclass_storage_name: kubernetes_proxy_node02
+ roles:
+ - kubernetes_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>:
+ reclass_storage_name: kubernetes_compute_rack01
roles:
- kubernetes_compute
- linux_system_codename_xenial
@@ -119,18 +142,4 @@
ens3:
role: single_dhcp
ens4:
- role: single_ctl
- single_address: ${_param:kubernetes_compute_node01_address}
-
- cmp002:
- reclass_storage_name: kubernetes_compute_node02
- roles:
- - kubernetes_compute
- - linux_system_codename_xenial
- - salt_master_host
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:kubernetes_compute_node02_address}
+ role: single_ctl_calico
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml
index a7994a8..94b248f 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml
@@ -2,7 +2,7 @@
{% from 'cookied-cicd-k8s-calico/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-cicd-k8s-calico/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
index 6168b6e..81a8afa 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
@@ -13,6 +13,8 @@
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP03 = os_env('HOSTNAME_CMP03', 'cmp003.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP04 = os_env('HOSTNAME_CMP04', 'cmp004.' + DOMAIN_NAME) %}
{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
@@ -23,12 +25,12 @@
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX02 = os_env('HOSTNAME_PRX02', 'prx02.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
{% import 'cookied-cicd-k8s-calico/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-cicd-k8s-calico/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
{% import 'cookied-cicd-k8s-calico/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
{% import 'cookied-cicd-k8s-calico/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
@@ -36,7 +38,6 @@
aliases:
- &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
@@ -57,6 +58,8 @@
default_{{ HOSTNAME_CID01 }}: +91
default_{{ HOSTNAME_CID02 }}: +92
default_{{ HOSTNAME_CID03 }}: +93
+ default_{{ HOSTNAME_PRX01 }}: +221
+ default_{{ HOSTNAME_PRX02 }}: +222
ip_ranges:
dhcp: [+90, -10]
@@ -80,6 +83,8 @@
default_{{ HOSTNAME_CTL03 }}: +13
default_{{ HOSTNAME_CMP01 }}: +101
default_{{ HOSTNAME_CMP02 }}: +102
+ default_{{ HOSTNAME_CMP03 }}: +103
+ default_{{ HOSTNAME_CMP04 }}: +104
default_{{ HOSTNAME_LOG }}: +60
default_{{ HOSTNAME_LOG01 }}: +61
default_{{ HOSTNAME_LOG02 }}: +62
@@ -92,7 +97,8 @@
default_{{ HOSTNAME_MTR01 }}: +86
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_PRX01 }}: +222
+ default_{{ HOSTNAME_PRX01 }}: +221
+ default_{{ HOSTNAME_PRX02 }}: +222
default_{{ HOSTNAME_KVM }}: +240
default_{{ HOSTNAME_KVM01 }}: +241
default_{{ HOSTNAME_KVM02 }}: +242
@@ -129,6 +135,8 @@
default_{{ HOSTNAME_CID01 }}: +91
default_{{ HOSTNAME_CID02 }}: +92
default_{{ HOSTNAME_CID03 }}: +93
+ default_{{ HOSTNAME_PRX01 }}: +221
+ default_{{ HOSTNAME_PRX02 }}: +222
ip_ranges:
dhcp: [+10, -10]
@@ -192,8 +200,6 @@
memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
boot:
- hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
volumes:
- name: system
capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
@@ -206,15 +212,6 @@
bus: ide
# source_image: !os_env CFG01_CONFIG_PATH # no source image required.
# it will be uploaded after config drive generation
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
interfaces:
- label: ens3
l2_network_device: admin
@@ -402,7 +399,7 @@
role: k8s_controller
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -412,9 +409,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -431,7 +425,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -441,9 +435,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -460,7 +451,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -470,9 +461,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -489,7 +477,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
@@ -515,7 +503,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
@@ -536,3 +524,107 @@
interfaces: *interfaces
network_config: *network_config
+
+ - name: {{ HOSTNAME_CMP03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CMP04 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
index a1b22be..7352614 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
@@ -83,7 +83,7 @@
control_vlan: '10'
cookiecutter_template_branch: ''
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 10.167.5.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 10.167.5.0/24
@@ -119,10 +119,25 @@
kubernetes_control_node03_address: 10.167.4.13
kubernetes_control_node03_deploy_address: 10.167.5.13
kubernetes_control_node03_hostname: ctl03
+ kubernetes_compute_count: 4
+ kubernetes_compute_rack01_hostname: cmp
+ kubernetes_compute_deploy_address_ranges: 10.167.5.101-10.167.5.104
+ kubernetes_compute_single_address_ranges: 10.167.4.101-10.167.4.104
+ kubernetes_compute_tenant_address_ranges: 10.167.6.101-10.167.6.104
kubernetes_enabled: 'True'
kubernetes_externaldns_enabled: 'False'
kubernetes_keepalived_vip_interface: br_ctl
kubernetes_network_calico_enabled: 'True'
+ kubernetes_proxy_hostname: prx
+ kubernetes_proxy_node01_hostname: prx01
+ kubernetes_proxy_node02_hostname: prx02
+ kubernetes_proxy_address: 10.167.4.220
+ kubernetes_proxy_node01_address: 10.167.4.221
+ kubernetes_proxy_node02_address: 10.167.4.222
+ kubernetes_metallb_enabled: 'True'
+ metallb_addresses: 172.17.16.150-172.17.16.190
+ kubernetes_ingressnginx_enabled: 'True'
+ kubernetes_ingressnginx_controller_replicas: 2
local_repositories: 'False'
maas_deploy_address: 10.167.5.15
maas_deploy_range_end: 10.167.5.199
@@ -151,7 +166,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 10.167.5.15
shared_reclass_branch: 'proposed'
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
stacklight_enabled: 'False'
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
@@ -169,5 +184,3 @@
kubernetes_network_genie_enabled: 'True'
kubernetes_genie_default_plugin: 'calico'
kubernetes_virtlet_enabled: 'True'
- kubernetes_compute_node01_hostname: cmp001
- kubernetes_compute_node02_hostname: cmp002
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml
index 4c01f4f..807d07f 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml
@@ -9,6 +9,8 @@
role: single_dhcp
ens4:
role: single_static_ctl
+ ens5:
+ role: single_storage_dhcp
kvm01:
reclass_storage_name: infra_kvm_node01
@@ -85,7 +87,9 @@
ens3:
role: single_dhcp
ens4:
- role: single_ctl
+ role: single_ctl_calico
+ ens5:
+ role: single_storage_dhcp
ctl02:
reclass_storage_name: kubernetes_control_node02
@@ -96,7 +100,9 @@
ens3:
role: single_dhcp
ens4:
- role: single_ctl
+ role: single_ctl_calico
+ ens5:
+ role: single_storage_dhcp
ctl03:
reclass_storage_name: kubernetes_control_node03
@@ -107,10 +113,39 @@
ens3:
role: single_dhcp
ens4:
- role: single_ctl
+ role: single_ctl_calico
+ ens5:
+ role: single_storage_dhcp
- cmp001:
- reclass_storage_name: kubernetes_compute_node01
+ prx01:
+ reclass_storage_name: kubernetes_proxy_node01
+ roles:
+ - kubernetes_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: single_storage_dhcp
+
+ prx02:
+ reclass_storage_name: kubernetes_proxy_node02
+ roles:
+ - kubernetes_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: single_storage_dhcp
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>:
+ reclass_storage_name: kubernetes_compute_rack01
roles:
- kubernetes_compute
- linux_system_codename_xenial
@@ -119,18 +154,6 @@
ens3:
role: single_dhcp
ens4:
- role: single_ctl
- single_address: ${_param:kubernetes_compute_node01_address}
-
- cmp002:
- reclass_storage_name: kubernetes_compute_node02
- roles:
- - kubernetes_compute
- - linux_system_codename_xenial
- - salt_master_host
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:kubernetes_compute_node02_address}
+ role: single_ctl_calico
+ ens5:
+ role: single_storage_dhcp
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/salt.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/salt.yaml
index 1e3c924..625472c 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/salt.yaml
@@ -2,7 +2,7 @@
{% from 'cookied-cicd-k8s-genie/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-cicd-k8s-genie/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
index 8c1b151..ee69506 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
@@ -13,12 +13,15 @@
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03') %}
{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001') %}
{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002') %}
+{% set HOSTNAME_CMP03 = os_env('HOSTNAME_CMP03', 'cmp003') %}
+{% set HOSTNAME_CMP04 = os_env('HOSTNAME_CMP04', 'cmp004') %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01') %}
+{% set HOSTNAME_PRX02 = os_env('HOSTNAME_PRX02', 'prx02') %}
{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01') %}
{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02') %}
{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03') %}
{% import 'cookied-cicd-k8s-genie/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-cicd-k8s-genie/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
{% import 'cookied-cicd-k8s-genie/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
{% import 'cookied-cicd-k8s-genie/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
@@ -26,7 +29,6 @@
aliases:
- &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
@@ -47,6 +49,8 @@
default_{{ HOSTNAME_CID01 }}: +91
default_{{ HOSTNAME_CID02 }}: +92
default_{{ HOSTNAME_CID03 }}: +93
+ default_{{ HOSTNAME_PRX01 }}: +221
+ default_{{ HOSTNAME_PRX02 }}: +222
ip_ranges:
dhcp: [+90, -10]
@@ -70,6 +74,10 @@
default_{{ HOSTNAME_CTL03 }}: +13
default_{{ HOSTNAME_CMP01 }}: +101
default_{{ HOSTNAME_CMP02 }}: +102
+ default_{{ HOSTNAME_CMP03 }}: +103
+ default_{{ HOSTNAME_CMP04 }}: +104
+ default_{{ HOSTNAME_PRX01 }}: +221
+ default_{{ HOSTNAME_PRX02 }}: +222
default_{{ HOSTNAME_KVM }}: +240
default_{{ HOSTNAME_KVM01 }}: +241
default_{{ HOSTNAME_KVM02 }}: +242
@@ -106,6 +114,15 @@
default_{{ HOSTNAME_CID01 }}: +91
default_{{ HOSTNAME_CID02 }}: +92
default_{{ HOSTNAME_CID03 }}: +93
+ default_{{ HOSTNAME_CTL01 }}: +11
+ default_{{ HOSTNAME_CTL02 }}: +12
+ default_{{ HOSTNAME_CTL03 }}: +13
+ default_{{ HOSTNAME_CMP01 }}: +101
+ default_{{ HOSTNAME_CMP02 }}: +102
+ default_{{ HOSTNAME_CMP03 }}: +103
+ default_{{ HOSTNAME_CMP04 }}: +104
+ default_{{ HOSTNAME_PRX01 }}: +221
+ default_{{ HOSTNAME_PRX02 }}: +222
ip_ranges:
dhcp: [+10, -10]
@@ -145,7 +162,7 @@
external:
address_pool: external-pool01
- dhcp: false
+ dhcp: true
forward:
mode: nat
@@ -169,8 +186,6 @@
memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
boot:
- hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
volumes:
- name: system
capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
@@ -183,29 +198,26 @@
bus: ide
# source_image: !os_env CFG01_CONFIG_PATH # no source image required.
# it will be uploaded after config drive generation
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
+ interfaces: &all_interfaces
- label: ens3
l2_network_device: admin
interface_model: *interface_model
- label: ens4
l2_network_device: private
interface_model: *interface_model
- network_config:
+ - label: ens5
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
ens3:
networks:
- admin
ens4:
networks:
- private
+ ens5:
+ networks:
+ - external
- name: {{ HOSTNAME_KVM01 }}
role: salt_minion
@@ -379,7 +391,7 @@
role: k8s_controller
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -389,9 +401,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -401,14 +410,14 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CTL02 }}
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -418,9 +427,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -430,14 +436,14 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CTL03 }}
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -447,9 +453,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -459,14 +462,14 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CMP01 }}
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
@@ -485,14 +488,14 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
- name: {{ HOSTNAME_CMP02 }}
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
@@ -511,5 +514,109 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *interfaces
- network_config: *network_config
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_CMP03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_CMP04 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_PRX02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
index a5992bf..53c9687 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
@@ -51,7 +51,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 10.167.5.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 10.167.5.0/24
@@ -97,6 +97,9 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 10.167.4
openstack_compute_rack01_tenant_subnet: 10.167.6
+ openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
+ openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
+ openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
openstack_control_address: 10.167.4.100
openstack_control_hostname: ctl
openstack_control_node01_address: 10.167.4.101
@@ -138,10 +141,11 @@
openstack_nfv_sriov_enabled: 'False'
openstack_nova_compute_hugepages_count: '2048'
openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_nova_cpu_pinning: '3'
+ openstack_nova_cpu_pinning: '4,5,8,9,10,11'
openstack_ovs_dvr_enabled: 'False'
openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 10.167.4.80
+ openstack_proxy_address: 172.17.16.80 # external network endpoint
+ openstack_proxy_vip_interface: ens5
openstack_proxy_hostname: prx
openstack_proxy_node01_address: 10.167.4.121
openstack_proxy_node01_hostname: prx01
@@ -190,7 +194,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 10.167.5.15
shared_reclass_branch: 'proposed'
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
stacklight_enabled: 'False'
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml
index 53f5dd0..8e0ecb6 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml
@@ -80,6 +80,7 @@
reclass_storage_name: openstack_control_node01
roles:
- openstack_control_leader
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -91,6 +92,7 @@
reclass_storage_name: openstack_control_node02
roles:
- openstack_control
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -102,6 +104,7 @@
reclass_storage_name: openstack_control_node03
roles:
- openstack_control
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -178,20 +181,24 @@
prx01:
reclass_storage_name: openstack_proxy_node01
roles:
- - openstack_proxy
+ #- openstack_proxy # another VIP interface used
- linux_system_codename_xenial
interfaces:
ens3:
role: single_dhcp
ens4:
role: single_ctl
+ ens5:
+ role: single_external
+ external_address: 172.17.16.121
+ external_network_netmask: 255.255.255.0
# Generator-based computes. For compatibility only
cmp<<count>>:
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
- - features_lvm_backend
+ - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
@@ -218,7 +225,10 @@
ens4:
role: single_ctl
ens5:
- role: single_ovs_br_prv
- mtu: 1500
+ role: bond0_ab_ovs_vxlan_mesh_no_tag
+ ens6:
+ role: bond0_ab_ovs_vxlan_mesh_no_tag
ens7:
- role: bond1_ab_ovs_floating
+ role: single_ovs_br_floating
+ external_address: 10.90.0.110
+ external_network_netmask: 255.255.255.0
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml
index a2d8eb5..ae2e235 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml
@@ -5,7 +5,7 @@
{% from 'cookied-cicd-pike-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-cicd-pike-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
@@ -18,21 +18,13 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: "Workaround for PROD-18834: Pre-install linux-headers package"
- cmd: salt 'cmp*' cmd.run "apt-get install -y linux-headers-$(uname -r)";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: "Workaround for PROD-17975: Pre-install ovs packages to update alternatives to DPDK"
+- description: "Workaround to avoid reboot cmp nodes: apply patch to bring OVS interfaces UP (PROD-24343)"
cmd: |
- set -ex;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.system.repo
- salt 'cmp*' cmd.run "apt-get install -y openvswitch-switch openvswitch-switch-dpdk";
- salt 'cmp*' cmd.run "service openvswitch-switch stop";
- salt 'cmp*' cmd.run "rm -f /var/lib/openvswitch/*";
- salt 'cmp*' cmd.run "update-alternatives --remove ovs-vswitchd /usr/lib/openvswitch-switch/ovs-vswitchd";
- salt 'cmp*' cmd.run "service openvswitch-switch start";
+ set -ex
+ git clone https://gerrit.mcp.mirantis.com/salt-formulas/linux /root/salt-formula-linux
+ cd /root/salt-formula-linux
+ git fetch https://gerrit.mcp.mirantis.com/salt-formulas/linux refs/changes/32/29432/11 && git checkout FETCH_HEAD
+ cp -r /root/salt-formula-linux/linux/ /srv/salt/env/prd/
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml
index d7b588e..c114631 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml
@@ -2,14 +2,12 @@
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
{% import 'cookied-cicd-pike-dpdk/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-cicd-pike-dpdk/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
{% import 'cookied-cicd-pike-dpdk/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
---
aliases:
- &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-pike-dpdk') %}
@@ -220,8 +218,6 @@
memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
boot:
- hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
volumes:
- name: system
capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
@@ -234,15 +230,6 @@
bus: ide
# source_image: !os_env CFG01_CONFIG_PATH # no source image required.
# it will be uploaded after config drive generation
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
interfaces:
- label: ens3
l2_network_device: admin
@@ -518,9 +505,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -530,9 +514,26 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604_swp
- interfaces: *interfaces
- network_config: *network_config
-
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - external
- name: {{ HOSTNAME_CMP01 }}
role: salt_minion
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
index 375d734..b0c69e8 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
@@ -51,7 +51,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 10.167.5.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 10.167.5.0/24
@@ -97,6 +97,9 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 10.167.4
openstack_compute_rack01_tenant_subnet: 10.167.6
+ openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
+ openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
+ openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
openstack_control_address: 10.167.4.100
openstack_control_hostname: ctl
openstack_control_node01_address: 10.167.4.101
@@ -139,7 +142,8 @@
openstack_nova_compute_nfv_req_enabled: 'False'
openstack_ovs_dvr_enabled: 'True'
openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 10.167.4.80
+ openstack_proxy_address: 172.17.16.80 # external network endpoint
+ openstack_proxy_vip_interface: ens5
openstack_proxy_hostname: prx
openstack_proxy_node01_address: 10.167.4.121
openstack_proxy_node01_hostname: prx01
@@ -192,7 +196,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 10.167.5.15
shared_reclass_branch: 'proposed'
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
fluentd_enabled: 'True'
stacklight_enabled: 'True'
stacklight_log_address: 10.167.4.60
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
index 5ed6d36..4025792 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
@@ -84,6 +84,7 @@
- openstack_control_leader
- openstack_database_leader
- openstack_message_queue
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -97,6 +98,7 @@
- openstack_control
- openstack_database
- openstack_message_queue
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -110,6 +112,7 @@
- openstack_control
- openstack_database
- openstack_message_queue
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -120,13 +123,17 @@
prx01:
reclass_storage_name: openstack_proxy_node01
roles:
- - openstack_proxy
+ #- openstack_proxy # another VIP interface used
- linux_system_codename_xenial
interfaces:
ens3:
role: single_dhcp
ens4:
role: single_ctl
+ ens5:
+ role: single_external
+ external_address: 172.17.16.121
+ external_network_netmask: 255.255.255.0
mon01:
reclass_storage_name: stacklight_server_node01
@@ -232,6 +239,7 @@
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
+ - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
@@ -246,7 +254,6 @@
gtw01:
reclass_storage_name: openstack_gateway_node01
roles:
- - openstack_gateway
- linux_system_codename_xenial
interfaces:
ens3:
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
index e9d9408..12e013c 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
@@ -2,13 +2,24 @@
{% from 'cookied-cicd-pike-dvr-sl/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-cicd-pike-dvr-sl/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+- description: "Workaround to avoid reboot cmp nodes: apply patch to bring OVS interfaces UP (PROD-24343)"
+ cmd: |
+ set -ex
+ git clone https://gerrit.mcp.mirantis.com/salt-formulas/linux /root/salt-formula-linux
+ cd /root/salt-formula-linux
+ git fetch https://gerrit.mcp.mirantis.com/salt-formulas/linux refs/changes/32/29432/11 && git checkout FETCH_HEAD
+ cp -r /root/salt-formula-linux/linux/ /srv/salt/env/prd/
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
index a964d2b..2cbbce6 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
@@ -2,14 +2,12 @@
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
{% import 'cookied-cicd-pike-dvr-sl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-cicd-pike-dvr-sl/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
{% import 'cookied-cicd-pike-dvr-sl/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
---
aliases:
- &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-pike-dvr-sl') %}
@@ -161,8 +159,7 @@
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
- dhcp: [+10, -10]
-
+ dhcp: [+180, +220]
groups:
- name: default
@@ -226,8 +223,6 @@
memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
boot:
- hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
volumes:
- name: system
capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
@@ -240,15 +235,6 @@
bus: ide
# source_image: !os_env CFG01_CONFIG_PATH # no source image required.
# it will be uploaded after config drive generation
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
interfaces:
- label: ens3
l2_network_device: admin
@@ -278,9 +264,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -319,9 +302,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -348,9 +328,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -611,9 +588,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -623,9 +597,26 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604_swp
- interfaces: *interfaces
- network_config: *network_config
-
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - external
- name: {{ HOSTNAME_CMP01 }}
role: salt_minion
@@ -641,6 +632,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -692,6 +686,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
index d1e447c..a5a862b 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
@@ -51,7 +51,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 10.167.5.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 10.167.5.0/24
@@ -97,6 +97,9 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 10.167.4
openstack_compute_rack01_tenant_subnet: 10.167.6
+ openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
+ openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
+ openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
openstack_control_address: 10.167.4.100
openstack_control_hostname: ctl
openstack_control_node01_address: 10.167.4.101
@@ -139,7 +142,8 @@
openstack_nova_compute_nfv_req_enabled: 'False'
openstack_ovs_dvr_enabled: 'False'
openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 10.167.4.80
+ openstack_proxy_address: 172.17.16.80 # external network endpoint
+ openstack_proxy_vip_interface: ens5
openstack_proxy_hostname: prx
openstack_proxy_node01_address: 10.167.4.121
openstack_proxy_node01_hostname: prx01
@@ -186,13 +190,42 @@
7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
-----END RSA PRIVATE KEY-----
backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
+ octavia_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEAtjnPDJsQToHBtoqIo15mdSYpfi8z6DFMi8Gbo0KCN33OUn5u
+ OctbdtjUfeuhvI6px1SCnvyWi09Ft8eWwq+KwLCGKbUxLvqKltuJ7K3LIrGXkt+m
+ qZN4O9XKeVKfZH+mQWkkxRWgX2r8RKNV3GkdNtd74VjhP+R6XSKJQ1Z8b7eHM10v
+ 6IjTY/jPczjK+eyCeEj4qbSnV8eKlqLhhquuSQRmUO2DRSjLVdpdf2BB4/BdWFsD
+ YOmX7mb8kpEr9vQ+c1JKMXDwD6ehzyU8kE+1kVm5zOeEy4HdYIMpvUfN49P1anRV
+ 2ISQ1ZE+r22IAMKl0tekrGH0e/1NP1DF5rINMwIDAQABAoIBAQCkP/cgpaRNHyg8
+ ISKIHs67SWqdEm73G3ijgB+JSKmW2w7dzJgN//6xYUAnP/zIuM7PnJ0gMQyBBTMS
+ NBTv5spqZLKJZYivj6Tb1Ya8jupKm0jEWlMfBo2ZYVrfgFmrfGOfEebSvmuPlh9M
+ vuzlftmWVSSUOkjODmM9D6QpzgrbpktBuA/WpX+6esMTwJpOcQ5xZWEnHXnVzuTc
+ SncodVweE4gz6F1qorbqIJz8UAUQ5T0OZTdHzIS1IbamACHWaxQfixAO2s4+BoUK
+ ANGGZWkfneCxx7lthvY8DiKn7M5cSRnqFyDToGqaLezdkMNlGC7v3U11FF5blSEW
+ fL1o/HwBAoGBAOavhTr8eqezTchqZvarorFIq7HFWk/l0vguIotu6/wlh1V/KdF+
+ aLLHgPgJ5j+RrCMvTBoKqMeeHfVGrS2udEy8L1mK6b3meG+tMxU05OA55abmhYn7
+ 7vF0q8XJmYIHIXmuCgF90R8Piscb0eaMlmHW9unKTKo8EOs5j+D8+AMJAoGBAMo4
+ 8WW+D3XiD7fsymsfXalf7VpAt/H834QTbNZJweUWhg11eLutyahyyfjjHV200nNZ
+ cnU09DWKpBbLg7d1pyT69CNLXpNnxuWCt8oiUjhWCUpNqVm2nDJbUdlRFTzYb2fS
+ ZC4r0oQaPD5kMLSipjcwzMWe0PniySxNvKXKInFbAoGBAKxW2qD7uKKKuQSOQUft
+ aAksMmEIAHWKTDdvOA2VG6XvX5DHBLXmy08s7rPfqW06ZjCPCDq4Velzvgvc9koX
+ d/lP6cvqlL9za+x6p5wjPQ4rEt/CfmdcmOE4eY+1EgLrUt314LHGjjG3ScWAiirE
+ QyDrGOIGaYoQf89L3KqIMr0JAoGARYAklw8nSSCUvmXHe+Gf0yKA9M/haG28dCwo
+ 780RsqZ3FBEXmYk1EYvCFqQX56jJ25MWX2n/tJcdpifz8Q2ikHcfiTHSI187YI34
+ lKQPFgWb08m1NnwoWrY//yx63BqWz1vjymqNQ5GwutC8XJi5/6Xp+tGGiRuEgJGH
+ EIPUKpkCgYAjBIVMkpNiLCREZ6b+qjrPV96ed3iTUt7TqP7yGlFI/OkORFS38xqC
+ hBP6Fk8iNWuOWQD+ohM/vMMnvIhk5jwlcwn+kF0ra04gi5KBFWSh/ddWMJxUtPC1
+ 2htvlEc6zQAR6QfqXHmwhg1hP81JcpqpicQzCMhkzLoR1DC6stXdLg==
+ -----END RSA PRIVATE KEY-----
+ octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2Oc8MmxBOgcG2ioijXmZ1Jil+LzPoMUyLwZujQoI3fc5Sfm45y1t22NR966G8jqnHVIKe/JaLT0W3x5bCr4rAsIYptTEu+oqW24nsrcsisZeS36apk3g71cp5Up9kf6ZBaSTFFaBfavxEo1XcaR0213vhWOE/5HpdIolDVnxvt4czXS/oiNNj+M9zOMr57IJ4SPiptKdXx4qWouGGq65JBGZQ7YNFKMtV2l1/YEHj8F1YWwNg6ZfuZvySkSv29D5zUkoxcPAPp6HPJTyQT7WRWbnM54TLgd1ggym9R83j0/VqdFXYhJDVkT6vbYgAwqXS16SsYfR7/U0/UMXmsg0z
salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
salt_master_address: 10.167.4.15
salt_master_hostname: cfg01
salt_master_management_address: 10.167.5.15
shared_reclass_branch: 'proposed'
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
fluentd_enabled: 'True'
stacklight_enabled: 'True'
stacklight_log_address: 10.167.4.60
@@ -228,3 +261,9 @@
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
+ openstack_octavia_enabled: 'True'
+ octavia_hm_bind_ip: 192.168.1.12
+ octavia_lb_mgmt_cidr: 192.168.1.0/24
+ octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
+ octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
+
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml
index 1791477..28a1115 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml
@@ -82,6 +82,7 @@
- openstack_control_leader
- openstack_database_leader
- openstack_message_queue
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -95,6 +96,7 @@
- openstack_control
- openstack_database
- openstack_message_queue
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -108,6 +110,7 @@
- openstack_control
- openstack_database
- openstack_message_queue
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -118,13 +121,17 @@
prx01:
reclass_storage_name: openstack_proxy_node01
roles:
- - openstack_proxy
+ #- openstack_proxy # another VIP interface used
- linux_system_codename_xenial
interfaces:
ens3:
role: single_dhcp
ens4:
role: single_ctl
+ ens5:
+ role: single_external
+ external_address: 172.17.16.121
+ external_network_netmask: 255.255.255.0
mon01:
reclass_storage_name: stacklight_server_node01
@@ -230,6 +237,7 @@
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
+ - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
@@ -244,7 +252,6 @@
gtw01:
reclass_storage_name: openstack_gateway_node01
roles:
- - openstack_gateway
- linux_system_codename_xenial
interfaces:
ens3:
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
index 3c4d021..33440ad 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
@@ -2,13 +2,24 @@
{% from 'cookied-cicd-pike-ovs-sl/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-cicd-pike-ovs-sl/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+- description: "Workaround to avoid reboot cmp nodes: apply patch to bring OVS interfaces UP (PROD-24343)"
+ cmd: |
+ set -ex
+ git clone https://gerrit.mcp.mirantis.com/salt-formulas/linux /root/salt-formula-linux
+ cd /root/salt-formula-linux
+ git fetch https://gerrit.mcp.mirantis.com/salt-formulas/linux refs/changes/32/29432/11 && git checkout FETCH_HEAD
+ cp -r /root/salt-formula-linux/linux/ /srv/salt/env/prd/
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml
index f52a1a0..a7b966c 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml
@@ -2,14 +2,12 @@
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
{% import 'cookied-cicd-pike-ovs-sl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-cicd-pike-ovs-sl/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
{% import 'cookied-cicd-pike-ovs-sl/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
---
aliases:
- &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-pike-ovs-sl') %}
@@ -226,8 +224,6 @@
memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
boot:
- hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
volumes:
- name: system
capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
@@ -240,15 +236,6 @@
bus: ide
# source_image: !os_env CFG01_CONFIG_PATH # no source image required.
# it will be uploaded after config drive generation
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
interfaces:
- label: ens3
l2_network_device: admin
@@ -278,9 +265,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -319,9 +303,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -348,9 +329,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -611,9 +589,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -623,8 +598,26 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604_swp
- interfaces: *interfaces
- network_config: *network_config
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - external
- name: {{ HOSTNAME_CMP01 }}
role: salt_minion
@@ -640,6 +633,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -691,6 +687,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml
new file mode 100644
index 0000000..855363b
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml
@@ -0,0 +1,269 @@
+default_context:
+ auditd_enabled: 'False'
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ cicd_control_node01_address: 10.167.4.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.4.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.4.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.4.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
+ 3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
+ AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
+ xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
+ B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
+ q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
+ s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
+ V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
+ 9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
+ pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
+ MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
+ 7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
+ udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
+ R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
+ XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
+ Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
+ KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
+ 6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
+ ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
+ ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
+ Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
+ r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
+ mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
+ qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
+ 9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
+ cluster_domain: cookied-cicd-queens-dvr-sl.local
+ cluster_name: cookied-cicd-queens-dvr-sl
+ compute_bond_mode: active-backup
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.4.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: master
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+ deploy_network_gateway: 10.167.5.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: 10.167.5.0/24
+ deployment_type: physical
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
+ email_address: ddmitriev@mirantis.com
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.4.241
+ infra_kvm01_deploy_address: 10.167.5.91
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.4.242
+ infra_kvm02_deploy_address: 10.167.5.92
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.4.243
+ infra_kvm03_deploy_address: 10.167.5.93
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.4.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 10.167.5.15
+ maas_deploy_range_end: 10.167.5.199
+ maas_deploy_range_start: 10.167.5.180
+ maas_deploy_vlan: '0'
+ maas_fabric_name: deploy-fabric0
+ maas_hostname: cfg01
+ mcp_version: proposed
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openldap_domain: ${_param:cluster_name}.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ openssh_groups: cicd
+ openstack_benchmark_node01_address: 10.167.4.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 10.167.4
+ openstack_compute_rack01_tenant_subnet: 10.167.6
+ openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
+ openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
+ openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
+ openstack_control_address: 10.167.4.100
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.4.101
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.4.102
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.4.103
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.4.100
+ openstack_database_hostname: ctl
+ openstack_database_node01_address: 10.167.4.101
+ openstack_database_node01_hostname: ctl01
+ openstack_database_node02_address: 10.167.4.102
+ openstack_database_node02_hostname: ctl02
+ openstack_database_node03_address: 10.167.4.103
+ openstack_database_node03_hostname: ctl03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 10.167.4.110
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 10.167.6.6
+ openstack_gateway_node02_address: 10.167.4.111
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.167.6.7
+ openstack_gateway_node03_address: 10.167.4.112
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 10.167.6.8
+ openstack_message_queue_address: 10.167.4.100
+ openstack_message_queue_hostname: ctl
+ openstack_message_queue_node01_address: 10.167.4.101
+ openstack_message_queue_node01_hostname: ctl01
+ openstack_message_queue_node02_address: 10.167.4.102
+ openstack_message_queue_node02_hostname: ctl02
+ openstack_message_queue_node03_address: 10.167.4.103
+ openstack_message_queue_node03_hostname: ctl03
+ openstack_network_engine: ovs
+ openstack_neutron_qos: 'False'
+ openstack_neutron_vlan_aware_vms: 'False'
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_ovs_dvr_enabled: 'True'
+ openstack_ovs_encapsulation_type: vxlan
+ openstack_proxy_address: 10.167.4.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.4.121
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.4.122
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.4.19
+ openstack_version: queens
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_webhook_app_id: '24'
+ oss_pushkin_email_sender_password: password
+ oss_pushkin_smtp_port: '587'
+ oss_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
+ salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+ salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+ salt_master_address: 10.167.4.15
+ salt_master_hostname: cfg01
+ salt_master_management_address: 10.167.5.15
+ shared_reclass_branch: 'proposed'
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
+ fluentd_enabled: 'True'
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.4.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.4.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.4.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.4.63
+ stacklight_log_node03_hostname: log03
+ stacklight_monitor_address: 10.167.4.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.4.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.4.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.4.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.4.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.4.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.4.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.4.88
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ stacklight_long_term_storage_type: prometheus
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.167.6.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.167.6.0/24
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'False'
+ octavia_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEAtjnPDJsQToHBtoqIo15mdSYpfi8z6DFMi8Gbo0KCN33OUn5u
+ OctbdtjUfeuhvI6px1SCnvyWi09Ft8eWwq+KwLCGKbUxLvqKltuJ7K3LIrGXkt+m
+ qZN4O9XKeVKfZH+mQWkkxRWgX2r8RKNV3GkdNtd74VjhP+R6XSKJQ1Z8b7eHM10v
+ 6IjTY/jPczjK+eyCeEj4qbSnV8eKlqLhhquuSQRmUO2DRSjLVdpdf2BB4/BdWFsD
+ YOmX7mb8kpEr9vQ+c1JKMXDwD6ehzyU8kE+1kVm5zOeEy4HdYIMpvUfN49P1anRV
+ 2ISQ1ZE+r22IAMKl0tekrGH0e/1NP1DF5rINMwIDAQABAoIBAQCkP/cgpaRNHyg8
+ ISKIHs67SWqdEm73G3ijgB+JSKmW2w7dzJgN//6xYUAnP/zIuM7PnJ0gMQyBBTMS
+ NBTv5spqZLKJZYivj6Tb1Ya8jupKm0jEWlMfBo2ZYVrfgFmrfGOfEebSvmuPlh9M
+ vuzlftmWVSSUOkjODmM9D6QpzgrbpktBuA/WpX+6esMTwJpOcQ5xZWEnHXnVzuTc
+ SncodVweE4gz6F1qorbqIJz8UAUQ5T0OZTdHzIS1IbamACHWaxQfixAO2s4+BoUK
+ ANGGZWkfneCxx7lthvY8DiKn7M5cSRnqFyDToGqaLezdkMNlGC7v3U11FF5blSEW
+ fL1o/HwBAoGBAOavhTr8eqezTchqZvarorFIq7HFWk/l0vguIotu6/wlh1V/KdF+
+ aLLHgPgJ5j+RrCMvTBoKqMeeHfVGrS2udEy8L1mK6b3meG+tMxU05OA55abmhYn7
+ 7vF0q8XJmYIHIXmuCgF90R8Piscb0eaMlmHW9unKTKo8EOs5j+D8+AMJAoGBAMo4
+ 8WW+D3XiD7fsymsfXalf7VpAt/H834QTbNZJweUWhg11eLutyahyyfjjHV200nNZ
+ cnU09DWKpBbLg7d1pyT69CNLXpNnxuWCt8oiUjhWCUpNqVm2nDJbUdlRFTzYb2fS
+ ZC4r0oQaPD5kMLSipjcwzMWe0PniySxNvKXKInFbAoGBAKxW2qD7uKKKuQSOQUft
+ aAksMmEIAHWKTDdvOA2VG6XvX5DHBLXmy08s7rPfqW06ZjCPCDq4Velzvgvc9koX
+ d/lP6cvqlL9za+x6p5wjPQ4rEt/CfmdcmOE4eY+1EgLrUt314LHGjjG3ScWAiirE
+ QyDrGOIGaYoQf89L3KqIMr0JAoGARYAklw8nSSCUvmXHe+Gf0yKA9M/haG28dCwo
+ 780RsqZ3FBEXmYk1EYvCFqQX56jJ25MWX2n/tJcdpifz8Q2ikHcfiTHSI187YI34
+ lKQPFgWb08m1NnwoWrY//yx63BqWz1vjymqNQ5GwutC8XJi5/6Xp+tGGiRuEgJGH
+ EIPUKpkCgYAjBIVMkpNiLCREZ6b+qjrPV96ed3iTUt7TqP7yGlFI/OkORFS38xqC
+ hBP6Fk8iNWuOWQD+ohM/vMMnvIhk5jwlcwn+kF0ra04gi5KBFWSh/ddWMJxUtPC1
+ 2htvlEc6zQAR6QfqXHmwhg1hP81JcpqpicQzCMhkzLoR1DC6stXdLg==
+ -----END RSA PRIVATE KEY-----
+ octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2Oc8MmxBOgcG2ioijXmZ1Jil+LzPoMUyLwZujQoI3fc5Sfm45y1t22NR966G8jqnHVIKe/JaLT0W3x5bCr4rAsIYptTEu+oqW24nsrcsisZeS36apk3g71cp5Up9kf6ZBaSTFFaBfavxEo1XcaR0213vhWOE/5HpdIolDVnxvt4czXS/oiNNj+M9zOMr57IJ4SPiptKdXx4qWouGGq65JBGZQ7YNFKMtV2l1/YEHj8F1YWwNg6ZfuZvySkSv29D5zUkoxcPAPp6HPJTyQT7WRWbnM54TLgd1ggym9R83j0/VqdFXYhJDVkT6vbYgAwqXS16SsYfR7/U0/UMXmsg0z
+ openstack_octavia_enabled: 'True'
+ octavia_hm_bind_ip: 192.168.1.12
+ octavia_lb_mgmt_cidr: 192.168.1.0/24
+ octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
+ octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
+
+
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/environment_context.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/environment_context.yaml
new file mode 100644
index 0000000..c3efdde
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/environment_context.yaml
@@ -0,0 +1,262 @@
+nodes:
+ cfg01:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ classes:
+ - environment.cookied-cicd-queens-dvr-sl.override_ntp_virtual
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_static_ctl
+
+ kvm01:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kvm02:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kvm03:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid01:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid02:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid03:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl01:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - openstack_database_leader
+ - openstack_message_queue
+ - features_lvm_backend_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl02:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_lvm_backend_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl03:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_lvm_backend_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx01:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon01:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon02:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon03:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log01:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log02:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log03:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr01:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr02:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr03:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - features_lvm_backend_volume_vdb
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw01:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/salt.yaml
new file mode 100644
index 0000000..62a8a23
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/salt.yaml
@@ -0,0 +1,25 @@
+{% from 'cookied-cicd-queens-dvr-sl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-queens-dvr-sl/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-queens-dvr-sl/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+- description: "Workaround to avoid reboot cmp nodes: apply patch to bring OVS interfaces UP (PROD-24343)"
+ cmd: |
+ set -ex
+ git clone https://gerrit.mcp.mirantis.com/salt-formulas/linux /root/salt-formula-linux
+ cd /root/salt-formula-linux
+ git fetch https://gerrit.mcp.mirantis.com/salt-formulas/linux refs/changes/32/29432/11 && git checkout FETCH_HEAD
+ cp -r /root/salt-formula-linux/linux/ /srv/salt/env/prd/
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--meta-data.yaml
similarity index 100%
copy from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml
copy to tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..4c43578
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data-cfg01.yaml
@@ -0,0 +1,101 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo touch /is_cloud_init_started
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - echo "******** MOUNT CONFIG DRIVE"
+ # Mount config drive
+ - mkdir /root/config-drive
+ - mount /dev/sr0 /root/config-drive
+
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ #- sudo ifdown ens3
+ #- sudo ip r d default || true # remove existing default route to get it from dhcp
+ #- sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ # Run user data script from config drive
+ - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3; ip l set down ens3
+ - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4; ip l set down ens4
+ - rm -f /etc/network/interfaces
+ #- ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
+ #- cp /root/config-drive/user-data /root/user-data
+ #- sed -i '/^reboot$/d' /root/user-data
+ #- set -x; cd /root && /bin/bash -xe ./user-data
+ - |
+ set -x
+ cd /root/config-drive
+ if /bin/bash -xe ./user-data; then
+ touch /is_cloud_init_finished
+ else
+ set +x
+ echo "bootstrap script /root/config-drive/user-data failed\n" > /is_cloud_init_failed
+ fi
+
+ # Enable root access (after reboot)
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ #- path: /etc/network/interfaces
+ - path: /root/interfaces
+ content: |
+ auto lo
+ iface lo inet loopback
+
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 60
+ ServerAliveCountMax 0
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml
similarity index 71%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml
index a73ca23..319c007 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml
@@ -25,27 +25,25 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - export TERM=linux
+ - export LANG=C
# Configure dhclient
- sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- sudo resolvconf -u
+ # Enable grub menu using updated config below
+ - update-grub
+
# Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- sudo ifup ens3
#- sudo route add default gw {gateway} {interface_name}
# Create swap
- - fallocate -l 4G /swapfile
+ - fallocate -l 16G /swapfile
- chmod 600 /swapfile
- mkswap /swapfile
- swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- # Enable grub menu using updated config below
- - update-grub
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
@@ -59,12 +57,3 @@
auto ens3
iface ens3 inet dhcp
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml
new file mode 100644
index 0000000..baa714d
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml
@@ -0,0 +1,867 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-cicd-queens-dvr-sl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-queens-dvr-sl') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-cicd-queens-dvr-sl_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+ default_{{ HOSTNAME_KVM }}: +240
+ default_{{ HOSTNAME_KVM01 }}: +241
+ default_{{ HOSTNAME_KVM02 }}: +242
+ default_{{ HOSTNAME_KVM03 }}: +243
+ default_{{ HOSTNAME_CID }}: +90
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+ default_{{ HOSTNAME_KVM }}: +240
+ default_{{ HOSTNAME_KVM01 }}: +241
+ default_{{ HOSTNAME_KVM02 }}: +242
+ default_{{ HOSTNAME_KVM03 }}: +243
+ default_{{ HOSTNAME_CID }}: +90
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+180, +220]
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: route
+
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ tenant:
+ address_pool: tenant-pool01
+ dhcp: false
+
+ external:
+ address_pool: external-pool01
+ dhcp: false
+ forward:
+ mode: route
+
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: {{ os_env('MCP_IMAGE_PATH1604') }} # http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2
+ format: qcow2
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
+ source_image: !os_env MCP_IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
+ memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ volumes:
+ - name: system
+ capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: config
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
+ # it will be uploaded after config drive generation
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+
+ interfaces: &all_interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: tenant
+ interface_model: *interface_model
+ - label: ens6
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - tenant
+ ens6:
+ networks:
+ - external
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_KVM01 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+ memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_KVM02 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+ memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_KVM03 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+ memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID01 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID02 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID03 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-cookiecutter-mcp-mitaka-dvr-ceph.yaml
similarity index 80%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
copy to tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-cookiecutter-mcp-mitaka-dvr-ceph.yaml
index 505282d..16b73bd 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-cookiecutter-mcp-mitaka-dvr-ceph.yaml
@@ -1,9 +1,44 @@
default_context:
bmk_enabled: 'False'
- ceph_enabled: 'False'
+ designate_backend: bind
+ ceph_cluster_network: 172.16.10.0/24
+ ceph_enabled: 'True'
+ ceph_hyper_converged: 'False'
+ ceph_mon_node01_address: 172.16.10.66
+ ceph_mon_node01_hostname: cmn01
+ ceph_mon_node02_address: 172.16.10.67
+ ceph_mon_node02_hostname: cmn02
+ ceph_mon_node03_address: 172.16.10.68
+ ceph_mon_node03_hostname: cmn03
+ ceph_osd_backend: bluestore
+ ceph_osd_block_db_size: '10'
+ ceph_osd_bond_mode: active-backup
+ ceph_osd_count: '2'
+ ceph_osd_data_disks: /dev/vdb
+ ceph_osd_journal_or_block_db_disks: /dev/vdc
+ ceph_osd_node_count: '2'
+ ceph_osd_journal_size: '10'
+ ceph_osd_primary_first_nic: eth1
+ ceph_osd_primary_second_nic: eth2
+ ceph_osd_rack01_backend_subnet: 172.16.10
+ ceph_osd_rack01_hostname: osd
+ ceph_osd_rack01_single_subnet: 172.16.10
+ ceph_osd_single_address_ranges: 172.16.10.94-172.16.10.95
+ ceph_osd_deploy_address_ranges: 172.16.11.94-172.16.11.95
+ ceph_osd_backend_address_ranges: 172.16.10.94-172.16.10.95
+ ceph_public_network: 172.16.10.0/24
+ ceph_rgw_address: 172.16.10.75
+ ceph_rgw_hostname: rgw
+ ceph_rgw_node01_address: 172.16.10.76
+ ceph_rgw_node01_hostname: rgw01
+ ceph_rgw_node02_address: 172.16.10.77
+ ceph_rgw_node02_hostname: rgw02
+ ceph_rgw_node03_address: 172.16.10.78
+ ceph_rgw_node03_hostname: rgw03
+ ceph_version: luminous
cicd_enabled: 'False'
- cluster_domain: virtual-mcp-ocata-dvr.local
- cluster_name: virtual-mcp-ocata-dvr
+ cluster_domain: cookied-mcp-mitaka-dvr-ceph.local
+ cluster_name: cookied-mcp-mitaka-dvr-ceph
compute_bond_mode: active-backup
compute_primary_first_nic: eth1
compute_primary_second_nic: eth2
@@ -13,16 +48,17 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 192.168.10.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
+ dns_server01: 8.8.8.8
+ dns_server02: 8.8.4.4
email_address: ddmitriev@mirantis.com
gateway_primary_first_nic: eth1
gateway_primary_second_nic: eth2
+ gnocchi_aggregation_storage: ceph
infra_bond_mode: active-backup
infra_deploy_nic: eth0
infra_kvm01_control_address: 172.16.10.101
@@ -51,6 +87,9 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 172.16.10.100
openstack_control_hostname: ctl
openstack_control_node01_address: 172.16.10.101
@@ -100,13 +139,13 @@
openstack_proxy_node02_address: 172.16.10.122
openstack_proxy_node02_hostname: prx02
openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: ocata
+ openstack_version: mitaka
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
+ oss_notification_app_id: '24'
+ oss_notification_sender_password: password
+ oss_notification_smtp_port: '587'
+ oss_notification_webhook_login_id: '13'
platform: openstack_enabled
public_host: ${_param:openstack_proxy_address}
publication_method: email
@@ -145,7 +184,7 @@
salt_master_address: 172.16.10.90
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
fluentd_enabled: 'True'
stacklight_enabled: 'True'
stacklight_log_address: 172.16.10.70
@@ -181,4 +220,4 @@
tenant_network_subnet: 10.1.0.0/24
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
+ use_default_network_scheme: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-environment.yaml
similarity index 66%
copy from tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
copy to tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-environment.yaml
index 3e05cf0..89bf918 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-environment.yaml
@@ -21,6 +21,7 @@
- features_designate_bind9_dns
- features_designate_bind9
- features_designate_bind9_keystone
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -38,6 +39,7 @@
- features_designate_bind9_database
- features_designate_bind9_dns
- features_designate_bind9
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -54,6 +56,7 @@
- openstack_message_queue
- features_designate_bind9_database
- features_designate_bind9
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -117,6 +120,7 @@
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
+ - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
@@ -142,3 +146,80 @@
role: bond0_ab_ovs_vxlan_mesh
ens6:
role: bond1_ab_ovs_floating
+
+ osd<<count>>.mcp11-ovs-dpdk.local:
+ reclass_storage_name: ceph_osd_rack01
+ roles:
+ - ceph_osd
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cmn01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: ceph_mon_node01
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cmn02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: ceph_mon_node02
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cmn03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: ceph_mon_node03
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ rgw01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: ceph_rgw_node01
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ rgw02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: ceph_rgw_node02
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ rgw03.cmcp11-ovs-dpdk.local:
+ reclass_storage_name: ceph_rgw_node03
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/core.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/core.yaml
new file mode 100644
index 0000000..546cc34
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/core.yaml
@@ -0,0 +1,12 @@
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/openstack.yaml
similarity index 63%
copy from tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
copy to tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/openstack.yaml
index a1aac52..318a992 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/openstack.yaml
@@ -1,12 +1,15 @@
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{% import 'shared-ceph.yaml' as SHARED_CEPH with context %}
# Deploy nginx before openstack services (PROD-22740)
- description: Deploy nginx proxy
@@ -16,7 +19,15 @@
retry: {count: 1, delay: 5}
skip_fail: true
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MONS() }}
+
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MGR() }}
+
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_OSD_AND_RADOSGW() }}
+
+{{ SHARED_CEPH.CONNECT_CEPH_TO_SERVICES() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -148,97 +159,7 @@
retry: {count: 1, delay: 30}
skip_fail: false
-# Configure cinder-volume salt-call
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Restart cinder volume
- cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: iptables --policy FORWARD ACCEPT
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/salt.yaml
new file mode 100644
index 0000000..ef50b6d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/salt.yaml
@@ -0,0 +1,51 @@
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
+{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-mitaka-dvr-ceph/overrides.yml') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{%- if OVERRIDES != '' %}
+{%- for param in OVERRIDES.splitlines() %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
+- description: Override cluster parameters
+ cmd: |
+ salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+{%- endfor %}
+
+- description: Refresh pillar
+ cmd: salt '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+{%- endif %}
+
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/sl.yaml
similarity index 95%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
copy to tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/sl.yaml
index ff0e77a..cb93ac9 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/sl.yaml
@@ -1,4 +1,4 @@
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
# Install docker swarm
- description: Configure docker service
@@ -66,6 +66,13 @@
skip_fail: false
# Install slv2 infra
+# Install MongoDB for alerta
+- description: Install MongoDB
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
- description: Install telegraf
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--meta-data.yaml
similarity index 100%
copy from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml
copy to tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data-cfg01.yaml
similarity index 89%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
rename to tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data-cfg01.yaml
index a73ca23..d75dab1 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data-cfg01.yaml
@@ -47,6 +47,13 @@
# Enable grub menu using updated config below
- update-grub
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
content: |
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data1604.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml
copy to tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay.yaml
similarity index 67%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
copy to tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay.yaml
index de5427a..248d63e 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay.yaml
@@ -1,36 +1,41 @@
# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% import 'virtual-mcp-ocata-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-mcp-mitaka-dvr-ceph/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-mitaka-dvr-ceph/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-mitaka-dvr-ceph/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
----
aliases:
- &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-dvr') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-mitaka-dvr-ceph') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.') %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.') %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.') %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.') %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.') %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.') %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.') %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.') %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.') %}
+{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.') %}
+{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.') %}
+{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.') %}
+{% set HOSTNAME_RGW01 = os_env('HOSTNAME_CMN01', 'rgw01.') %}
+{% set HOSTNAME_RGW02 = os_env('HOSTNAME_CMN02', 'rgw02.') %}
+{% set HOSTNAME_RGW03 = os_env('HOSTNAME_CMN03', 'rgw03.') %}
+{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd001.') %}
+{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd002.') %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.') %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.') %}
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'cookied-mcp-mitaka-dvr-ceph_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
private-pool01:
@@ -48,9 +53,15 @@
default_{{ HOSTNAME_MON01 }}: +107
default_{{ HOSTNAME_MON02 }}: +108
default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_OSD01 }}: +94
+ default_{{ HOSTNAME_OSD02 }}: +95
+ default_{{ HOSTNAME_CMN01 }}: +96
+ default_{{ HOSTNAME_CMN02 }}: +97
+ default_{{ HOSTNAME_CMN03 }}: +98
+ default_{{ HOSTNAME_RGW01 }}: +76
+ default_{{ HOSTNAME_RGW02 }}: +77
+ default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
dhcp: [+90, -10]
@@ -70,9 +81,15 @@
default_{{ HOSTNAME_MON01 }}: +107
default_{{ HOSTNAME_MON02 }}: +108
default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_OSD01 }}: +94
+ default_{{ HOSTNAME_OSD02 }}: +95
+ default_{{ HOSTNAME_CMN01 }}: +96
+ default_{{ HOSTNAME_CMN02 }}: +97
+ default_{{ HOSTNAME_CMN03 }}: +98
+ default_{{ HOSTNAME_RGW01 }}: +76
+ default_{{ HOSTNAME_RGW02 }}: +77
+ default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
dhcp: [+90, -10]
@@ -92,9 +109,15 @@
default_{{ HOSTNAME_MON01 }}: +107
default_{{ HOSTNAME_MON02 }}: +108
default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_OSD01 }}: +94
+ default_{{ HOSTNAME_OSD02 }}: +95
+ default_{{ HOSTNAME_CMN01 }}: +96
+ default_{{ HOSTNAME_CMN02 }}: +97
+ default_{{ HOSTNAME_CMN03 }}: +98
+ default_{{ HOSTNAME_RGW01 }}: +76
+ default_{{ HOSTNAME_RGW02 }}: +77
+ default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
dhcp: [+10, -10]
@@ -114,9 +137,15 @@
default_{{ HOSTNAME_MON01 }}: +107
default_{{ HOSTNAME_MON02 }}: +108
default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_OSD01 }}: +94
+ default_{{ HOSTNAME_OSD02 }}: +95
+ default_{{ HOSTNAME_CMN01 }}: +96
+ default_{{ HOSTNAME_CMN02 }}: +97
+ default_{{ HOSTNAME_CMN03 }}: +98
+ default_{{ HOSTNAME_RGW01 }}: +76
+ default_{{ HOSTNAME_RGW02 }}: +77
+ default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
dhcp: [+10, -10]
@@ -232,9 +261,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -273,9 +299,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -302,9 +325,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -409,9 +429,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -439,6 +456,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -490,6 +510,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -528,10 +551,10 @@
interfaces: *all_interfaces
network_config: *all_network_config
- - name: {{ HOSTNAME_DNS01 }}
+ - name: {{ HOSTNAME_CMN01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
+ vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
@@ -540,7 +563,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
+ backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -551,13 +574,13 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *all_interfaces
- network_config: *all_network_config
+ interfaces: *interfaces
+ network_config: *network_config
- - name: {{ HOSTNAME_DNS02 }}
+ - name: {{ HOSTNAME_CMN02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
+ vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
@@ -566,7 +589,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
+ backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -577,5 +600,172 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
- interfaces: *all_interfaces
- network_config: *all_network_config
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CMN03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_OSD01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: ceph_osd
+ capacity: 50
+ format: qcow2
+ - name: ceph_journal
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_OSD02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: ceph_osd
+ capacity: 50
+ format: qcow2
+ - name: ceph_journal
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_RGW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_RGW02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+ - name: {{ HOSTNAME_RGW03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
index 0ade6cf..58281a4 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
@@ -13,7 +13,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 192.168.10.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
@@ -51,6 +51,9 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 172.16.10.100
openstack_control_hostname: ctl
openstack_control_node01_address: 172.16.10.101
@@ -145,7 +148,7 @@
salt_master_address: 172.16.10.90
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
fluentd_enabled: 'True'
stacklight_enabled: 'True'
stacklight_log_address: 172.16.10.70
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-environment.yaml
index ca8114b..65f4131 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-environment.yaml
@@ -20,6 +20,7 @@
- features_designate_pool_manager_database
- features_designate_pool_manager
- features_designate_pool_manager_keystone
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -36,6 +37,7 @@
- openstack_message_queue
- features_designate_pool_manager_database
- features_designate_pool_manager
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -52,6 +54,7 @@
- openstack_message_queue
- features_designate_pool_manager_database
- features_designate_pool_manager
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -115,6 +118,7 @@
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
+ - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml
index e1d2cb4..8954160 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml
@@ -2,98 +2,11 @@
{% import 'shared-core.yaml' as SHARED_CORE with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml
index 699ffcb..b335251 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml
@@ -9,6 +9,7 @@
{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
# Install OpenStack control services
@@ -40,7 +41,7 @@
retry: {count: 1, delay: 5}
skip_fail: true
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -172,91 +173,7 @@
retry: {count: 1, delay: 30}
skip_fail: false
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Restart cinder volume
- cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
@@ -269,9 +186,3 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
-
-- description: Set floating ip address on br-floating
- cmd: ifconfig br-floating {{ IPV4_NET_EXTERNAL_PREFIX }}.110/24
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
index 04ec5b2..240f6e3 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
@@ -5,7 +5,7 @@
{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-mitaka-dvr/overrides.yml') %}
@@ -16,7 +16,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd" "logrotate"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
@@ -24,7 +24,7 @@
{%- if OVERRIDES != '' %}
{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
- description: Override cluster parameters
cmd: |
salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml
index a73ca23..d75dab1 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml
@@ -47,6 +47,13 @@
# Enable grub menu using updated config below
- update-grub
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
content: |
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
index 7225c6d..81afdb5 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
@@ -18,8 +18,8 @@
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
@@ -232,9 +232,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -273,9 +270,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -302,9 +296,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -409,9 +400,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -439,6 +427,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -490,6 +481,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml
index 10f3c17..ecc8054 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml
@@ -13,7 +13,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 192.168.10.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
@@ -51,6 +51,9 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 172.16.10.100
openstack_control_hostname: ctl
openstack_control_node01_address: 172.16.10.101
@@ -145,7 +148,7 @@
salt_master_address: 172.16.10.90
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
fluentd_enabled: 'True'
stacklight_enabled: 'True'
stacklight_log_address: 172.16.10.60
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-environment.yaml
index ca8114b..65f4131 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-environment.yaml
@@ -20,6 +20,7 @@
- features_designate_pool_manager_database
- features_designate_pool_manager
- features_designate_pool_manager_keystone
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -36,6 +37,7 @@
- openstack_message_queue
- features_designate_pool_manager_database
- features_designate_pool_manager
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -52,6 +54,7 @@
- openstack_message_queue
- features_designate_pool_manager_database
- features_designate_pool_manager
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -115,6 +118,7 @@
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
+ - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml
index 0c03d81..6a1278e 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml
@@ -2,99 +2,11 @@
{% import 'shared-core.yaml' as SHARED_CORE with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml
index 12261cd..6672997 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml
@@ -7,6 +7,7 @@
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
# Deploy nginx before openstack services (PROD-22740)
- description: Deploy nginx proxy
@@ -16,7 +17,7 @@
retry: {count: 1, delay: 5}
skip_fail: true
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -194,97 +195,7 @@
retry: {count: 1, delay: 30}
skip_fail: false
-# Configure cinder-volume salt-call
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Restart cinder volume
- cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: iptables --policy FORWARD ACCEPT
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
index 46fd677..9f3767b 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
@@ -5,7 +5,7 @@
{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-mitaka-ovs/overrides.yml') %}
@@ -16,7 +16,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd" "logrotate"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
@@ -24,7 +24,7 @@
{%- if OVERRIDES != '' %}
{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
- description: Override cluster parameters
cmd: |
salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml
index c02624c..4fee5c5 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml
@@ -18,8 +18,8 @@
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
@@ -230,9 +230,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -271,9 +268,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -300,9 +294,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -407,9 +398,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -436,6 +424,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -487,6 +478,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml
index b27d4f4..9cb3979 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml
@@ -13,7 +13,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 192.168.10.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
@@ -51,6 +51,9 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 172.16.10.100
openstack_control_hostname: ctl
openstack_control_node01_address: 172.16.10.101
@@ -145,7 +148,7 @@
salt_master_address: 172.16.10.90
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
fluentd_enabled: 'True'
stacklight_enabled: 'True'
stacklight_log_address: 172.16.10.70
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/_context-environment.yaml
index 803068e..6afe16e 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/_context-environment.yaml
@@ -20,6 +20,7 @@
- features_designate_pool_manager_database
- features_designate_pool_manager
- features_designate_pool_manager_keystone
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -36,6 +37,7 @@
- openstack_message_queue
- features_designate_pool_manager_database
- features_designate_pool_manager
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -52,6 +54,7 @@
- openstack_message_queue
- features_designate_pool_manager_database
- features_designate_pool_manager
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -115,6 +118,7 @@
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
+ - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/core.yaml
index ac74382..edb5059 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/core.yaml
@@ -2,99 +2,11 @@
{% import 'shared-core.yaml' as SHARED_CORE with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml
index 88d8367..7260beb 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml
@@ -9,6 +9,7 @@
{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
# Install OpenStack control services
@@ -40,7 +41,7 @@
retry: {count: 1, delay: 5}
skip_fail: true
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -172,97 +173,7 @@
retry: {count: 1, delay: 30}
skip_fail: false
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: iptables --policy FORWARD ACCEPT
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Restart cinder volume
- cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
@@ -275,3 +186,9 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
+
+- description: WORKAROUND PROD-23354
+ cmd: sed -n 's/max_microversion = 2.42/max_microversion = 2.38/;w /var/log/lvm_mcp_newton.conf' /var/log/lvm_mcp.conf
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml
index d65d59e..52ec2f4 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml
@@ -5,7 +5,7 @@
{% from 'cookied-mcp-newton-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-mcp-newton-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-cookied-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-cookied-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-newton-dvr/overrides.yml') %}
@@ -16,7 +16,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd" "logrotate"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
@@ -24,7 +24,7 @@
{%- if OVERRIDES != '' %}
{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
- description: Override cluster parameters
cmd: |
salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml
index a73ca23..d75dab1 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml
@@ -47,6 +47,13 @@
# Enable grub menu using updated config below
- update-grub
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
content: |
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml
index df0515f..7d6147d 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml
@@ -18,8 +18,8 @@
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
@@ -232,9 +232,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -273,9 +270,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -302,9 +296,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -409,9 +400,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -439,6 +427,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -490,6 +481,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml
index 09ed630..8049430 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml
@@ -13,7 +13,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 192.168.10.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
@@ -51,6 +51,9 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 172.16.10.100
openstack_control_hostname: ctl
openstack_control_node01_address: 172.16.10.101
@@ -145,7 +148,7 @@
salt_master_address: 172.16.10.90
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
fluentd_enabled: 'True'
stacklight_enabled: 'True'
stacklight_log_address: 172.16.10.70
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/_context-environment.yaml
index 3e05cf0..7baf03e 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/_context-environment.yaml
@@ -21,6 +21,7 @@
- features_designate_bind9_dns
- features_designate_bind9
- features_designate_bind9_keystone
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -38,6 +39,7 @@
- features_designate_bind9_database
- features_designate_bind9_dns
- features_designate_bind9
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -54,6 +56,7 @@
- openstack_message_queue
- features_designate_bind9_database
- features_designate_bind9
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -117,6 +120,7 @@
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
+ - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/core.yaml
index a7cd35f..4b79fcb 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/core.yaml
@@ -2,99 +2,11 @@
{% import 'shared-core.yaml' as SHARED_CORE with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
\ No newline at end of file
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml
index f11742b..04871da 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml
@@ -7,6 +7,7 @@
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
# Deploy nginx before openstack services (PROD-22740)
- description: Deploy nginx proxy
@@ -16,7 +17,7 @@
retry: {count: 1, delay: 5}
skip_fail: true
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -194,97 +195,7 @@
retry: {count: 1, delay: 30}
skip_fail: false
-# Configure cinder-volume salt-call
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Restart cinder volume
- cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: iptables --policy FORWARD ACCEPT
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
@@ -297,3 +208,9 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
+
+- description: WORKAROUND PROD-23354
+ cmd: sed -n 's/max_microversion = 2.42/max_microversion = 2.38/;w /var/log/lvm_mcp_newton.conf' /var/log/lvm_mcp.conf
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml
index c95be5c..89b705e 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml
@@ -5,7 +5,7 @@
{% from 'cookied-mcp-newton-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-mcp-newton-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-newton-ovs/overrides.yml') %}
@@ -16,7 +16,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd" "logrotate"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
@@ -24,7 +24,7 @@
{%- if OVERRIDES != '' %}
{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
- description: Override cluster parameters
cmd: |
salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml
index e84c25a..883c30f 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml
@@ -18,8 +18,8 @@
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
@@ -222,9 +222,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -263,9 +260,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -292,9 +286,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -399,9 +390,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -428,6 +416,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -479,6 +470,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
index aa9b016..f5b4f73 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
@@ -2,7 +2,7 @@
{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
index 095c58a..829d515 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
@@ -2,7 +2,7 @@
{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
similarity index 95%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
index 505282d..a74e3d7 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
@@ -2,8 +2,9 @@
bmk_enabled: 'False'
ceph_enabled: 'False'
cicd_enabled: 'False'
- cluster_domain: virtual-mcp-ocata-dvr.local
- cluster_name: virtual-mcp-ocata-dvr
+ designate_backend: bind
+ cluster_domain: cookied-mcp-ocata-dvr.local
+ cluster_name: cookied-mcp-ocata-dvr
compute_bond_mode: active-backup
compute_primary_first_nic: eth1
compute_primary_second_nic: eth2
@@ -13,7 +14,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 192.168.10.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
@@ -51,6 +52,9 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 172.16.10.100
openstack_control_hostname: ctl
openstack_control_node01_address: 172.16.10.101
@@ -145,7 +149,7 @@
salt_master_address: 172.16.10.90
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
fluentd_enabled: 'True'
stacklight_enabled: 'True'
stacklight_log_address: 172.16.10.70
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-environment.yaml
similarity index 89%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/_context-environment.yaml
index 803068e..f7518bc 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-environment.yaml
@@ -17,9 +17,10 @@
- openstack_control_leader
- openstack_database_leader
- openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_designate_pool_manager_keystone
+ - features_designate_bind9_database
+ - features_designate_bind9
+ - features_designate_bind9_keystone
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -34,8 +35,9 @@
- openstack_control
- openstack_database
- openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
+ - features_designate_bind9_database
+ - features_designate_bind9
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -50,8 +52,9 @@
- openstack_control
- openstack_database
- openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
+ - features_designate_bind9_database
+ - features_designate_bind9
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -63,7 +66,7 @@
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
- - features_designate_pool_manager_proxy
+ - features_designate_bind9_proxy
- linux_system_codename_xenial
interfaces:
ens3:
@@ -115,6 +118,7 @@
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
+ - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
@@ -144,7 +148,7 @@
dns01.mcp11-ovs-dpdk.local:
reclass_storage_name: openstack_dns_node01
roles:
- - features_designate_pool_manager_dns
+ - features_designate_bind9_dns
- linux_system_codename_xenial
classes:
- system.linux.system.repo.mcp.extra
@@ -161,7 +165,7 @@
dns02.mcp11-ovs-dpdk.local:
reclass_storage_name: openstack_dns_node02
roles:
- - features_designate_pool_manager_dns
+ - features_designate_bind9_dns
- linux_system_codename_xenial
classes:
- system.linux.system.repo.mcp.extra
@@ -173,4 +177,4 @@
role: single_dhcp
ens4:
role: single_ctl
- single_address: ${_param:openstack_dns_node02_address}
+ single_address: ${_param:openstack_dns_node02_address}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/core.yaml
new file mode 100644
index 0000000..fc5d4f8
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/core.yaml
@@ -0,0 +1,12 @@
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/openstack.yaml
similarity index 68%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/openstack.yaml
index 3de272e..dc9de1c 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/openstack.yaml
@@ -1,14 +1,15 @@
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
# Install OpenStack control services
@@ -40,7 +41,7 @@
retry: {count: 1, delay: 5}
skip_fail: true
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -53,9 +54,9 @@
{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
# install designate backend
-- description: Install powerdns
+- description: Install bind
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@powerdns:server' state.sls powerdns.server
+ -C 'I@bind:server' state.sls bind
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -172,97 +173,7 @@
retry: {count: 1, delay: 30}
skip_fail: false
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: iptables --policy FORWARD ACCEPT
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Restart cinder volume
- cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-ocata-dvr/overrides-policy.yml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/overrides-policy.yml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/overrides-policy.yml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/salt.yaml
similarity index 67%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/salt.yaml
index 6bac1ea..9d3deb7 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/salt.yaml
@@ -1,14 +1,14 @@
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/virtual-mcp-ocata-ovs/overrides.yml') %}
+{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-ocata-dvr/overrides.yml') %}
{% import 'shared-salt.yaml' as SHARED with context %}
@@ -16,7 +16,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd" "logrotate"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
@@ -24,7 +24,7 @@
{%- if OVERRIDES != '' %}
{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
- description: Override cluster parameters
cmd: |
salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/sl.yaml
similarity index 98%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/sl.yaml
index ff0e77a..405e647 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/sl.yaml
@@ -1,4 +1,4 @@
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
# Install docker swarm
- description: Configure docker service
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--meta-data.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--meta-data.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
similarity index 89%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
index a73ca23..d75dab1 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
@@ -47,6 +47,13 @@
# Enable grub menu using updated config below
- update-grub
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
content: |
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data1604.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay.yaml
similarity index 96%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/underlay.yaml
index de5427a..4893e2c 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay.yaml
@@ -1,9 +1,9 @@
# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% import 'virtual-mcp-ocata-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-mcp-ocata-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-ocata-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
aliases:
@@ -12,14 +12,14 @@
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-dvr') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-ocata-dvr') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
@@ -30,7 +30,7 @@
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'cookied-mcp-ocata-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
private-pool01:
@@ -232,9 +232,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -273,9 +270,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -302,9 +296,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -409,9 +400,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -439,6 +427,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -490,6 +481,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
similarity index 95%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
index eedb7d9..2a6d8f9 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
@@ -2,8 +2,8 @@
bmk_enabled: 'False'
ceph_enabled: 'False'
cicd_enabled: 'False'
- cluster_domain: virtual-mcp-ocata-ovs.local
- cluster_name: virtual-mcp-ocata-ovs
+ cluster_domain: cookied-mcp-ocata-ovs.local
+ cluster_name: cookied-mcp-ocata-ovs
compute_bond_mode: active-backup
compute_primary_first_nic: eth1
compute_primary_second_nic: eth2
@@ -13,7 +13,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 192.168.10.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
@@ -51,6 +51,9 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 172.16.10.100
openstack_control_hostname: ctl
openstack_control_node01_address: 172.16.10.101
@@ -145,7 +148,7 @@
salt_master_address: 172.16.10.90
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
fluentd_enabled: 'True'
stacklight_enabled: 'True'
stacklight_log_address: 172.16.10.70
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-environment.yaml
similarity index 95%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/_context-environment.yaml
index 3e05cf0..7baf03e 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-environment.yaml
@@ -21,6 +21,7 @@
- features_designate_bind9_dns
- features_designate_bind9
- features_designate_bind9_keystone
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -38,6 +39,7 @@
- features_designate_bind9_database
- features_designate_bind9_dns
- features_designate_bind9
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -54,6 +56,7 @@
- openstack_message_queue
- features_designate_bind9_database
- features_designate_bind9
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -117,6 +120,7 @@
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
+ - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/core.yaml
new file mode 100644
index 0000000..6fc2af4
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/core.yaml
@@ -0,0 +1,12 @@
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/openstack.yaml
similarity index 64%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/openstack.yaml
index a1aac52..4072632 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/openstack.yaml
@@ -1,12 +1,13 @@
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
# Deploy nginx before openstack services (PROD-22740)
- description: Deploy nginx proxy
@@ -16,7 +17,7 @@
retry: {count: 1, delay: 5}
skip_fail: true
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -148,97 +149,7 @@
retry: {count: 1, delay: 30}
skip_fail: false
-# Configure cinder-volume salt-call
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Restart cinder volume
- cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: iptables --policy FORWARD ACCEPT
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/salt.yaml
similarity index 67%
copy from tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
copy to tcp_tests/templates/cookied-mcp-ocata-ovs/salt.yaml
index 6bac1ea..41827c7 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/salt.yaml
@@ -1,14 +1,14 @@
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/virtual-mcp-ocata-ovs/overrides.yml') %}
+{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-ocata-ovs/overrides.yml') %}
{% import 'shared-salt.yaml' as SHARED with context %}
@@ -16,7 +16,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd" "logrotate"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
@@ -24,7 +24,7 @@
{%- if OVERRIDES != '' %}
{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
- description: Override cluster parameters
cmd: |
salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/sl.yaml
similarity index 98%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/sl.yaml
index 9ec64be..7cc598b 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/sl.yaml
@@ -1,4 +1,4 @@
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--meta-data.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--meta-data.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
similarity index 88%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data1604.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data1604.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay.yaml
similarity index 96%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/underlay.yaml
index 382dba4..2d31a5a 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay.yaml
@@ -1,9 +1,9 @@
# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% import 'virtual-mcp-ocata-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-mcp-ocata-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-ocata-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
aliases:
@@ -12,14 +12,14 @@
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-ovs') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-ocata-ovs') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
@@ -28,7 +28,7 @@
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'cookied-mcp-ocata-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
private-pool01:
@@ -222,9 +222,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -263,9 +260,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -292,9 +286,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -399,9 +390,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -428,6 +416,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -479,6 +470,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
index e17ac5b..725ff1c 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
@@ -13,7 +13,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 192.168.10.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
@@ -51,6 +51,9 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 172.16.10.100
openstack_control_hostname: ctl
openstack_control_node01_address: 172.16.10.101
@@ -92,7 +95,7 @@
openstack_nfv_sriov_enabled: 'False'
openstack_nova_compute_hugepages_count: '2048'
openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_nova_cpu_pinning: '3'
+ openstack_nova_cpu_pinning: '4,5,8,9,10,11'
openstack_ovs_dvr_enabled: 'False'
openstack_ovs_encapsulation_type: vxlan
openstack_proxy_address: 172.16.10.80
@@ -144,7 +147,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
shared_reclass_branch: master
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
stacklight_enabled: 'False'
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
index bd516f9..0cd60ba 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
@@ -15,6 +15,7 @@
roles:
- infra_kvm
- openstack_control_leader
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -27,6 +28,7 @@
roles:
- infra_kvm
- openstack_control
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -39,6 +41,7 @@
roles:
- infra_kvm
- openstack_control
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -128,7 +131,7 @@
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
- - features_lvm_backend
+ - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
@@ -155,7 +158,10 @@
ens4:
role: single_ctl
ens5:
- role: single_ovs_br_prv
- mtu: 1500
+ role: bond0_ab_ovs_vxlan_mesh_no_tag
+ ens6:
+ role: bond0_ab_ovs_vxlan_mesh_no_tag
ens7:
- role: bond1_ab_ovs_floating
+ role: single_ovs_br_floating
+ external_address: 10.90.0.110
+ external_network_netmask: 255.255.255.0
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/core.yaml
index 8057165..5716d76 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/core.yaml
@@ -1,118 +1,20 @@
{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml
index 69fbc1a..20b2fa6 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml
@@ -14,7 +14,7 @@
# Install OpenStack control services
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -61,66 +61,3 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 10, delay: 30}
skip_fail: false
-
-- description: TMP step related to PROD-17975 for cmp
- cmd: salt "cmp*" cmd.run ' systemctl restart openvswitch-switch.service'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: TMP step related to PROD-17975 for cmp
- cmd: salt "gtw*" cmd.run ' systemctl restart openvswitch-switch.service'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04 --provider:network_type vxlan'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01 --ha False'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml
index dde0436..140eb8c 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml
@@ -5,7 +5,7 @@
{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml
index a9f0722..1dba85e 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml
@@ -489,9 +489,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
index 1230c56..22e4442 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
@@ -55,6 +55,9 @@
ceph_osd_rack01_backend_subnet: 10.167.4
ceph_osd_rack01_hostname: osd
ceph_osd_rack01_single_subnet: 10.167.4
+ ceph_osd_single_address_ranges: 10.167.4.94-10.167.4.95
+ ceph_osd_deploy_address_ranges: 10.167.5.94-10.167.5.95
+ ceph_osd_backend_address_ranges: 10.167.4.94-10.167.4.95
ceph_public_network: 10.167.4.0/24
ceph_rgw_address: 10.167.4.75
ceph_rgw_hostname: rgw
@@ -78,7 +81,7 @@
control_vlan: '10'
cookiecutter_template_branch: 'proposed'
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 10.167.5.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 10.167.5.0/24
@@ -89,7 +92,7 @@
gainsight_service_enabled: 'False'
gateway_primary_first_nic: eth1
gateway_primary_second_nic: eth2
- gnocchi_aggregation_storage: file
+ gnocchi_aggregation_storage: ceph
infra_bond_mode: active-backup
infra_deploy_nic: eth0
infra_kvm01_control_address: 10.167.4.11
@@ -128,8 +131,11 @@
openstack_cluster_size: compact
openstack_compute_count: '2'
openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.4
- openstack_compute_rack01_tenant_subnet: 10.167.6
+ openstack_compute_rack01_single_subnet: 172.16.10
+ openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 10.167.4.10
openstack_control_hostname: ctl
openstack_control_node01_address: 10.167.4.11
@@ -203,9 +209,17 @@
tenant_network_gateway: 10.167.6.1
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: 10.167.6.0/24
- tenant_telemetry_enabled: 'False'
+ tenant_telemetry_enabled: 'True'
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'True'
version: proposed
vnf_onboarding_enabled: 'False'
+ openstack_telemetry_address: 172.16.10.83
+ openstack_telemetry_hostname: mdb
+ openstack_telemetry_node01_address: 172.16.10.84
+ openstack_telemetry_node01_hostname: mdb01
+ openstack_telemetry_node02_address: 172.16.10.85
+ openstack_telemetry_node02_hostname: mdb02
+ openstack_telemetry_node03_address: 172.16.10.86
+ openstack_telemetry_node03_hostname: mdb03
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/core.yaml
index 5c6a2f8..08a3c00 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/core.yaml
@@ -1,118 +1,19 @@
{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
index 9745cb9..8531cc3 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
@@ -9,7 +9,17 @@
{% import 'shared-salt.yaml' as SHARED with context %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{% import 'shared-ceph.yaml' as SHARED_CEPH with context %}
+
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MONS() }}
+
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MGR() }}
+
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_OSD_AND_RADOSGW() }}
+
+{{ SHARED_CEPH.CONNECT_CEPH_TO_SERVICES() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -21,62 +31,16 @@
{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_REDIS() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GNOCCHI() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_PANKO() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CEILOMETER() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_AODH() }}
+
{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE() }}
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/salt.yaml
index 6c288d6..e90b99b 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/salt.yaml
@@ -5,7 +5,7 @@
{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
index cb6b03a..b36f8be 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
@@ -12,7 +12,7 @@
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-dvr-ceph') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
@@ -29,6 +29,9 @@
{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd2.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
template:
devops_settings:
@@ -57,6 +60,9 @@
default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_MDB01 }}: +84
+ default_{{ HOSTNAME_MDB02 }}: +85
+ default_{{ HOSTNAME_MDB03 }}: +86
ip_ranges:
dhcp: [+70, -10]
@@ -82,6 +88,9 @@
default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_MDB01 }}: +84
+ default_{{ HOSTNAME_MDB02 }}: +85
+ default_{{ HOSTNAME_MDB03 }}: +86
ip_ranges:
dhcp: [+70, -10]
@@ -107,6 +116,9 @@
default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_MDB01 }}: +84
+ default_{{ HOSTNAME_MDB02 }}: +85
+ default_{{ HOSTNAME_MDB03 }}: +86
ip_ranges:
dhcp: [+10, -10]
@@ -132,8 +144,11 @@
default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_MDB01 }}: +84
+ default_{{ HOSTNAME_MDB02 }}: +85
+ default_{{ HOSTNAME_MDB03 }}: +86
ip_ranges:
- dhcp: [+10, -10]
+ dhcp: [+130, +230]
groups:
- name: default
@@ -240,9 +255,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -281,9 +293,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -310,6 +319,90 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MDB01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MDB02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MDB03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
- name: cinder
capacity: 50
format: qcow2
@@ -417,10 +510,10 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
- - name: cinder
+ - name: ceph_osd
capacity: 50
format: qcow2
- - name: ceph
+ - name: ceph_journal
capacity: 50
format: qcow2
- name: iso # Volume with name 'iso' will be used
@@ -449,10 +542,10 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
- - name: cinder
+ - name: ceph_osd
capacity: 50
format: qcow2
- - name: ceph
+ - name: ceph_journal
capacity: 50
format: qcow2
- name: iso # Volume with name 'iso' will be used
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
index 74a1465..c89ec89 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
@@ -169,4 +169,37 @@
ens3:
role: single_dhcp
ens4:
- role: single_ctl
\ No newline at end of file
+ role: single_ctl
+
+ mdb01.cookied-mcp-pike-dvr-ceph.local:
+ reclass_storage_name: openstack_telemetry_node01
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mdb02.cookied-mcp-pike-dvr-ceph.local:
+ reclass_storage_name: openstack_telemetry_node02
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mdb03.cookied-mcp-pike-dvr-ceph.local:
+ reclass_storage_name: openstack_telemetry_node03
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml
similarity index 72%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml
index 505282d..0ad8daf 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml
@@ -1,9 +1,12 @@
default_context:
+ barbican_backend: dogtag
+ barbican_enabled: 'True'
+ auditd_enabled: 'True'
bmk_enabled: 'False'
ceph_enabled: 'False'
cicd_enabled: 'False'
- cluster_domain: virtual-mcp-ocata-dvr.local
- cluster_name: virtual-mcp-ocata-dvr
+ cluster_domain: cookied-mcp-pike-dvr-ssl-barbican.local
+ cluster_name: cookied-mcp-pike-dvr-ssl-barbican
compute_bond_mode: active-backup
compute_primary_first_nic: eth1
compute_primary_second_nic: eth2
@@ -13,7 +16,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 192.168.10.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
@@ -41,7 +44,8 @@
local_repositories: 'False'
maas_deploy_address: 192.168.10.90
maas_hostname: cfg01
- mcp_version: stable
+ maas_enabled: 'False'
+ mcp_version: proposed
offline_deployment: 'False'
opencontrail_enabled: 'False'
openstack_benchmark_node01_address: 172.16.10.95
@@ -51,6 +55,9 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 172.16.10.100
openstack_control_hostname: ctl
openstack_control_node01_address: 172.16.10.101
@@ -100,7 +107,12 @@
openstack_proxy_node02_address: 172.16.10.122
openstack_proxy_node02_hostname: prx02
openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: ocata
+ openstack_dns_hostname: dns
+ openstack_dns_node01_address: 172.16.10.113
+ openstack_dns_node01_hostname: dns01
+ openstack_dns_node02_address: 172.16.10.114
+ openstack_dns_node02_hostname: dns02
+ openstack_version: pike
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
oss_webhook_app_id: '24'
@@ -145,36 +157,35 @@
salt_master_address: 172.16.10.90
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
+ stacklight_enabled: 'False'
+ stacklight_log_address: 172.16.10.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 172.16.10.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 172.16.10.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 172.16.10.63
+ stacklight_log_node03_hostname: log03
stacklight_monitor_address: 172.16.10.70
stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
+ stacklight_monitor_node01_address: 172.16.10.71
stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
+ stacklight_monitor_node02_address: 172.16.10.72
stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
+ stacklight_monitor_node03_address: 172.16.10.73
stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
+ stacklight_telemetry_address: 172.16.10.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 172.16.10.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 172.16.10.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 172.16.10.88
+ stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
+ stacklight_long_term_storage_type: influxdb
static_ips_on_deploy_network_enabled: 'False'
tenant_network_gateway: 10.1.0.1
tenant_network_netmask: 255.255.255.0
@@ -182,3 +193,32 @@
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
+ rsync_fernet_rotation: 'True'
+ compute_padding_with_zeros: False
+ designate_backend: bind
+ designate_enabled: 'False'
+ nova_vnc_tls_enabled: 'True'
+ galera_ssl_enabled: 'True'
+ openstack_mysql_x509_enabled: 'True'
+ rabbitmq_ssl_enabled: 'True'
+ openstack_rabbitmq_x509_enabled: 'True'
+ openstack_internal_protocol: 'https'
+ tenant_telemetry_enabled: 'False'
+ gnocchi_aggregation_storage: file
+ manila_enabled: 'False'
+ manila_share_backend: 'lvm'
+ manila_lvm_volume_name: 'manila-volume'
+ manila_lvm_devices: '/dev/vdc'
+ openstack_share_address: 172.16.10.203
+ openstack_share_node01_address: 172.16.10.204
+ openstack_share_node01_deploy_address: 192.168.10.204
+ openstack_share_hostname: share
+ openstack_share_node01_hostname: share01
+ openstack_barbican_address: 172.16.10.44
+ openstack_barbican_hostname: kmn
+ openstack_barbican_node01_address: 172.16.10.45
+ openstack_barbican_node01_hostname: kmn01
+ openstack_barbican_node02_address: 172.16.10.46
+ openstack_barbican_node02_hostname: kmn02
+ openstack_barbican_node03_address: 172.16.10.47
+ openstack_barbican_node03_hostname: kmn03
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-environment.yaml
similarity index 69%
copy from tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-environment.yaml
index 3e05cf0..f704f65 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-environment.yaml
@@ -1,5 +1,5 @@
nodes:
- cfg01.mcp11-ovs-dpdk.local:
+ cfg01.mcp-pike-dvr-ssl.local:
reclass_storage_name: infra_config_node01
roles:
- infra_config
@@ -10,17 +10,14 @@
ens4:
role: single_ctl
- ctl01.mcp11-ovs-dpdk.local:
+ ctl01.mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_control_node01
roles:
- infra_kvm
- openstack_control_leader
- openstack_database_leader
- openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
- - features_designate_bind9_keystone
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -28,16 +25,14 @@
ens4:
role: single_ctl
- ctl02.mcp11-ovs-dpdk.local:
+ ctl02.mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_control_node02
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -45,15 +40,14 @@
ens4:
role: single_ctl
- ctl03.mcp11-ovs-dpdk.local:
+ ctl03.mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_control_node03
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -61,50 +55,43 @@
ens4:
role: single_ctl
- prx01.mcp11-ovs-dpdk.local:
+ kmn01.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_barbican_node01
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kmn02.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_barbican_node02
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kmn03.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_barbican_node03
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx01.mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
- - features_designate_bind9_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- linux_system_codename_xenial
interfaces:
ens3:
@@ -113,10 +100,11 @@
role: single_ctl
# Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
+ cmp<<count>>.mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
+ - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
@@ -128,7 +116,7 @@
ens6:
role: bond1_ab_ovs_floating
- gtw01.mcp11-ovs-dpdk.local:
+ gtw01.mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_gateway_node01
roles:
- openstack_gateway
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/core.yaml
new file mode 100644
index 0000000..f5a0013
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/core.yaml
@@ -0,0 +1,19 @@
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/openstack.yaml
new file mode 100644
index 0000000..627ed30
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/openstack.yaml
@@ -0,0 +1,36 @@
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+# Install OpenStack control services
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_DOGTAG() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_BARBICAN() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/salt.yaml
new file mode 100644
index 0000000..df13ee9
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/salt.yaml
@@ -0,0 +1,43 @@
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+- description: "Temp fix"
+ cmd: |
+ set -e;
+ apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+ [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
+ . /root/venv-reclass-tools/bin/activate;
+ pip install git+https://github.com/dis-xcom/reclass-tools;
+ reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--meta-data.yaml
similarity index 100%
copy from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
similarity index 87%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
index a73ca23..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
@@ -44,16 +44,14 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- # Enable grub menu using updated config below
- - update-grub
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- path: /etc/network/interfaces
content: |
auto ens3
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml
similarity index 86%
copy from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml
index 382dba4..e1befcb 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml
@@ -1,9 +1,9 @@
# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% import 'virtual-mcp-ocata-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-mcp-pike-dvr-ssl-barbican/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
aliases:
@@ -12,23 +12,23 @@
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-dvr-ssl-barbican') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KMN01 = os_env('HOSTNAME_KMN01', 'kmn01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KMN02 = os_env('HOSTNAME_KMN02', 'kmn02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KMN03 = os_env('HOSTNAME_KMN03', 'kmn03.' + DOMAIN_NAME) %}
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'cookied-mcp-pike-dvr-ssl-barbican_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
private-pool01:
@@ -43,11 +43,11 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_KMN01 }}: +45
+ default_{{ HOSTNAME_KMN02 }}: +46
+ default_{{ HOSTNAME_KMN03 }}: +47
ip_ranges:
dhcp: [+90, -10]
@@ -63,11 +63,11 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_KMN01 }}: +45
+ default_{{ HOSTNAME_KMN02 }}: +46
+ default_{{ HOSTNAME_KMN03 }}: +47
ip_ranges:
dhcp: [+90, -10]
@@ -83,11 +83,11 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_KMN01 }}: +45
+ default_{{ HOSTNAME_KMN02 }}: +46
+ default_{{ HOSTNAME_KMN03 }}: +47
ip_ranges:
dhcp: [+10, -10]
@@ -103,14 +103,13 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_KMN01 }}: +45
+ default_{{ HOSTNAME_KMN02 }}: +46
+ default_{{ HOSTNAME_KMN03 }}: +47
ip_ranges:
- dhcp: [+10, -10]
-
+ dhcp: [+130, +220]
groups:
- name: default
@@ -150,11 +149,10 @@
external:
address_pool: external-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
-
group_volumes:
- name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
@@ -162,11 +160,8 @@
- name: cfg01_day01_image # Pre-configured day01 image
source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
+ - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
+ source_image: !os_env MCP_IMAGE_PATH1604
format: qcow2
nodes:
@@ -222,9 +217,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -263,9 +255,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -292,9 +281,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -307,11 +293,11 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON01 }}
+ - name: {{ HOSTNAME_KMN01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -333,11 +319,11 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON02 }}
+ - name: {{ HOSTNAME_KMN02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -359,11 +345,11 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON03 }}
+ - name: {{ HOSTNAME_KMN03 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -399,9 +385,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -428,6 +411,12 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -479,6 +468,12 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -494,8 +489,8 @@
- name: {{ HOSTNAME_GTW01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
index bac6199..2fdfc6b 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
@@ -1,4 +1,7 @@
default_context:
+ barbican_backend: dogtag
+ barbican_enabled: 'False'
+ auditd_enabled: 'True'
bmk_enabled: 'False'
ceph_enabled: 'False'
cicd_enabled: 'False'
@@ -13,7 +16,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 192.168.10.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
@@ -41,16 +44,20 @@
local_repositories: 'False'
maas_deploy_address: 192.168.10.90
maas_hostname: cfg01
+ maas_enabled: 'False'
mcp_version: stable
offline_deployment: 'False'
opencontrail_enabled: 'False'
openstack_benchmark_node01_address: 172.16.10.95
openstack_benchmark_node01_hostname: bmk01
openstack_cluster_size: compact
- openstack_compute_count: '100'
+ openstack_compute_count: '2'
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 172.16.10.100
openstack_control_hostname: ctl
openstack_control_node01_address: 172.16.10.101
@@ -100,6 +107,11 @@
openstack_proxy_node02_address: 172.16.10.122
openstack_proxy_node02_hostname: prx02
openstack_upgrade_node01_address: 172.16.10.19
+ openstack_dns_hostname: dns
+ openstack_dns_node01_address: 172.16.10.113
+ openstack_dns_node01_hostname: dns01
+ openstack_dns_node02_address: 172.16.10.114
+ openstack_dns_node02_hostname: dns02
openstack_version: pike
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
@@ -145,39 +157,68 @@
salt_master_address: 172.16.10.90
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
+ fluentd_enabled: 'True'
stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
+ stacklight_log_address: 172.16.10.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 172.16.10.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 172.16.10.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 172.16.10.63
+ stacklight_log_node03_hostname: log03
stacklight_monitor_address: 172.16.10.70
stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
+ stacklight_monitor_node01_address: 172.16.10.71
stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
+ stacklight_monitor_node02_address: 172.16.10.72
stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
+ stacklight_monitor_node03_address: 172.16.10.73
stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
+ stacklight_telemetry_address: 172.16.10.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 172.16.10.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 172.16.10.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 172.16.10.88
+ stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
+ stacklight_long_term_storage_type: influxdb
static_ips_on_deploy_network_enabled: 'False'
+ openstack_telemetry_address: 172.16.10.96
+ openstack_telemetry_hostname: mdb
+ openstack_telemetry_node01_address: 172.16.10.97
+ openstack_telemetry_node01_hostname: mdb01
+ openstack_telemetry_node02_address: 172.16.10.98
+ openstack_telemetry_node02_hostname: mdb02
+ openstack_telemetry_node03_address: 172.16.10.99
+ openstack_telemetry_node03_hostname: mdb03
tenant_network_gateway: 10.1.0.1
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: 10.1.0.0/24
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
+ rsync_fernet_rotation: 'True'
+ compute_padding_with_zeros: False
+ designate_backend: bind
+ designate_enabled: 'False'
+ nova_vnc_tls_enabled: 'True'
+ galera_ssl_enabled: 'True'
+ openstack_mysql_x509_enabled: 'True'
+ rabbitmq_ssl_enabled: 'True'
+ openstack_rabbitmq_x509_enabled: 'True'
+ openstack_internal_protocol: 'https'
+ tenant_telemetry_enabled: 'True'
+ gnocchi_aggregation_storage: file
+ manila_enabled: 'False'
+ manila_share_backend: 'lvm'
+ manila_lvm_volume_name: 'manila-volume'
+ manila_lvm_devices: '/dev/vdc'
+ openstack_share_address: 172.16.10.203
+ openstack_share_node01_address: 172.16.10.204
+ openstack_share_node01_deploy_address: 192.168.10.204
+ openstack_share_hostname: share
+ openstack_share_node01_hostname: share01
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
index 327788e..83998a7 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
@@ -1,5 +1,5 @@
nodes:
- cfg01.cookied-mcp-pike-dvr-ssl.local:
+ cfg01.mcp-pike-dvr-ssl.local:
reclass_storage_name: infra_config_node01
roles:
- infra_config
@@ -8,125 +8,181 @@
ens3:
role: single_dhcp
ens4:
- role: single_vlan_ctl
+ role: single_ctl
- ctl01.cookied-mcp-pike-dvr-ssl.local:
+ ctl01.mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_control_node01
roles:
- infra_kvm
- openstack_control_leader
- openstack_database_leader
- openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_designate_pool_manager_keystone
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
role: single_dhcp
ens4:
- role: single_vlan_ctl
+ role: single_ctl
- ctl02.cookied-mcp-pike-dvr-ssl.local:
+ ctl02.mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_control_node02
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
role: single_dhcp
ens4:
- role: single_vlan_ctl
+ role: single_ctl
- ctl03.cookied-mcp-pike-dvr-ssl.local:
+ ctl03.mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_control_node03
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
role: single_dhcp
ens4:
- role: single_vlan_ctl
+ role: single_ctl
- prx01.cookied-mcp-pike-dvr-ssl.local:
+ prx01.mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
- - features_designate_pool_manager_proxy
- linux_system_codename_xenial
interfaces:
ens3:
role: single_dhcp
ens4:
- role: single_vlan_ctl
+ role: single_ctl
- mon01.cookied-mcp-pike-dvr-ssl.local:
+ mon01.mcp-pike-dvr-ssl.local:
reclass_storage_name: stacklight_server_node01
roles:
- stacklightv2_server_leader
- - stacklight_telemetry_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon02.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon03.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log01.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
- stacklight_log_leader_v2
- linux_system_codename_xenial
interfaces:
ens3:
role: single_dhcp
ens4:
- role: single_vlan_ctl
+ role: single_ctl
- mon02.cookied-mcp-pike-dvr-ssl.local:
- reclass_storage_name: stacklight_server_node02
+ log02.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: stacklight_log_node02
roles:
- - stacklightv2_server
- - stacklight_telemetry
- stacklight_log
- linux_system_codename_xenial
interfaces:
ens3:
role: single_dhcp
ens4:
- role: single_vlan_ctl
+ role: single_ctl
- mon03.cookied-mcp-pike-dvr-ssl.local:
- reclass_storage_name: stacklight_server_node03
+ log03.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: stacklight_log_node03
roles:
- - stacklightv2_server
- - stacklight_telemetry
- stacklight_log
- linux_system_codename_xenial
interfaces:
ens3:
role: single_dhcp
ens4:
- role: single_vlan_ctl
+ role: single_ctl
+
+ mtr01.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr02.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr03.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
# Generator-based computes. For compatibility only
- cmp<<count>>.cookied-mcp-pike-dvr-ssl.local:
+ cmp<<count>>.mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
+ - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
role: single_dhcp
ens4:
- role: bond0_ab_ovs_vxlan_ctl_mesh
+ role: single_ctl
ens5:
- role: bond0_ab_ovs_vxlan_ctl_mesh
+ role: bond0_ab_ovs_vxlan_mesh
ens6:
role: bond1_ab_ovs_floating
- gtw01.cookied-mcp-pike-dvr-ssl.local:
+ gtw01.mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_gateway_node01
roles:
- openstack_gateway
@@ -135,42 +191,41 @@
ens3:
role: single_dhcp
ens4:
- role: bond0_ab_ovs_vxlan_ctl_mesh
+ role: single_ctl
ens5:
- role: bond0_ab_ovs_vxlan_ctl_mesh
+ role: bond0_ab_ovs_vxlan_mesh
ens6:
role: bond1_ab_ovs_floating
- dns01.cookied-mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_dns_node01
+ mdb01.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_telemetry_node01
roles:
- - features_designate_pool_manager_dns
- linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.extra
- - system.linux.system.repo.mcp.apt_mirantis.openstack
- - system.linux.system.repo.mcp.apt_mirantis.ubuntu
- - system.linux.system.repo.mcp.apt_mirantis.saltstack
+ - openstack_telemetry
interfaces:
ens3:
role: single_dhcp
ens4:
- role: single_vlan_ctl
- single_address: ${_param:openstack_dns_node01_address}
+ role: single_ctl
- dns02.cookied-mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_dns_node02
+ mdb02.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_telemetry_node02
roles:
- - features_designate_pool_manager_dns
- linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.extra
- - system.linux.system.repo.mcp.apt_mirantis.openstack
- - system.linux.system.repo.mcp.apt_mirantis.ubuntu
- - system.linux.system.repo.mcp.apt_mirantis.saltstack
+ - openstack_telemetry
interfaces:
ens3:
role: single_dhcp
ens4:
- role: single_vlan_ctl
- single_address: ${_param:openstack_dns_node02_address}
+ role: single_ctl
+
+ mdb03.mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_telemetry_node03
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/core.yaml
index ded1586..f3d274a 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/core.yaml
@@ -1,117 +1,19 @@
{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml
index 5a0f780..c1e32ec 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml
@@ -3,291 +3,40 @@
{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL02 with context %}
{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL03 with context %}
{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import DOMAIN_NAME with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
+{% import 'shared-salt.yaml' as SHARED with context %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
# Install OpenStack control services
-{%- if OVERRIDE_POLICY != '' %}
-- description: Upload policy override
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: overrides-policy.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Create custom cluster control class
- cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Rename control classes
- cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
- ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
-{%- endif %}
-
-- description: Nginx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-# isntall designate
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@powerdns:server' state.sls powerdns.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_REDIS() }}
-# Install compute node
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GNOCCHI() }}
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_PANKO() }}
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CEILOMETER() }}
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_AODH() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-#- description: Allow all tcp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Allow all icmp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: iptables --policy FORWARD ACCEPT
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Restart cinder volume
- cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
index d2c5733..c67a1ac 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
@@ -5,7 +5,7 @@
{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
@@ -14,33 +14,30 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+- description: "Temp fix"
+ cmd: |
+ set -e;
+ apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+ [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
+ . /root/venv-reclass-tools/bin/activate;
+ pip install git+https://github.com/dis-xcom/reclass-tools;
+ reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: Hack gtw node
- cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Hack cmp01 node
- cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Hack cmp02 node
- cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
index 4f3d9bc..07cfef8 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
@@ -1,198 +1,24 @@
{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-sl.yaml' as SHARED_SL with context %}
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-# Install MongoDB for alerta
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Create MongoDB cluster
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 20}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure fluentd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus,heka.remote_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Launch containers
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+
{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
index 4c8efd8..26456f7 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
@@ -13,18 +13,28 @@
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-dvr-ssl') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
template:
devops_settings:
@@ -43,11 +53,21 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_MDB01 }}: +97
+ default_{{ HOSTNAME_MDB02 }}: +98
+ default_{{ HOSTNAME_MDB03 }}: +99
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+90, -10]
@@ -63,11 +83,21 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_MDB01 }}: +97
+ default_{{ HOSTNAME_MDB02 }}: +98
+ default_{{ HOSTNAME_MDB03 }}: +99
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+90, -10]
@@ -83,11 +113,21 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_MDB01 }}: +97
+ default_{{ HOSTNAME_MDB02 }}: +98
+ default_{{ HOSTNAME_MDB03 }}: +99
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+10, -10]
@@ -103,13 +143,23 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_MDB01 }}: +97
+ default_{{ HOSTNAME_MDB02 }}: +98
+ default_{{ HOSTNAME_MDB03 }}: +99
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
- dhcp: [+10, -10]
+ dhcp: [+130, +220]
groups:
@@ -150,7 +200,7 @@
external:
address_pool: external-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
@@ -219,9 +269,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -260,9 +307,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -289,6 +333,90 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MDB01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MDB02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MDB03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
- name: cinder
capacity: 50
format: qcow2
@@ -307,8 +435,8 @@
- name: {{ HOSTNAME_MON01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
@@ -333,8 +461,8 @@
- name: {{ HOSTNAME_MON02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
@@ -359,8 +487,164 @@
- name: {{ HOSTNAME_MON03 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
@@ -386,7 +670,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
cloud_init_volume_name: iso
@@ -396,9 +680,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -426,6 +707,12 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -477,6 +764,12 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
index a4c8abf..64031ea 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
@@ -13,7 +13,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 192.168.10.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
@@ -51,6 +51,9 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 172.16.10.100
openstack_control_hostname: ctl
openstack_control_node01_address: 172.16.10.101
@@ -100,6 +103,10 @@
openstack_proxy_node02_address: 172.16.10.122
openstack_proxy_node02_hostname: prx02
openstack_upgrade_node01_address: 172.16.10.19
+ designate_backend: bind
+ designate_enabled: 'True'
+ openstack_dns_node01_address: 172.16.10.113
+ openstack_dns_node02_address: 172.16.10.114
openstack_version: pike
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
@@ -140,12 +147,41 @@
7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
-----END RSA PRIVATE KEY-----
backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
+ octavia_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEAtjnPDJsQToHBtoqIo15mdSYpfi8z6DFMi8Gbo0KCN33OUn5u
+ OctbdtjUfeuhvI6px1SCnvyWi09Ft8eWwq+KwLCGKbUxLvqKltuJ7K3LIrGXkt+m
+ qZN4O9XKeVKfZH+mQWkkxRWgX2r8RKNV3GkdNtd74VjhP+R6XSKJQ1Z8b7eHM10v
+ 6IjTY/jPczjK+eyCeEj4qbSnV8eKlqLhhquuSQRmUO2DRSjLVdpdf2BB4/BdWFsD
+ YOmX7mb8kpEr9vQ+c1JKMXDwD6ehzyU8kE+1kVm5zOeEy4HdYIMpvUfN49P1anRV
+ 2ISQ1ZE+r22IAMKl0tekrGH0e/1NP1DF5rINMwIDAQABAoIBAQCkP/cgpaRNHyg8
+ ISKIHs67SWqdEm73G3ijgB+JSKmW2w7dzJgN//6xYUAnP/zIuM7PnJ0gMQyBBTMS
+ NBTv5spqZLKJZYivj6Tb1Ya8jupKm0jEWlMfBo2ZYVrfgFmrfGOfEebSvmuPlh9M
+ vuzlftmWVSSUOkjODmM9D6QpzgrbpktBuA/WpX+6esMTwJpOcQ5xZWEnHXnVzuTc
+ SncodVweE4gz6F1qorbqIJz8UAUQ5T0OZTdHzIS1IbamACHWaxQfixAO2s4+BoUK
+ ANGGZWkfneCxx7lthvY8DiKn7M5cSRnqFyDToGqaLezdkMNlGC7v3U11FF5blSEW
+ fL1o/HwBAoGBAOavhTr8eqezTchqZvarorFIq7HFWk/l0vguIotu6/wlh1V/KdF+
+ aLLHgPgJ5j+RrCMvTBoKqMeeHfVGrS2udEy8L1mK6b3meG+tMxU05OA55abmhYn7
+ 7vF0q8XJmYIHIXmuCgF90R8Piscb0eaMlmHW9unKTKo8EOs5j+D8+AMJAoGBAMo4
+ 8WW+D3XiD7fsymsfXalf7VpAt/H834QTbNZJweUWhg11eLutyahyyfjjHV200nNZ
+ cnU09DWKpBbLg7d1pyT69CNLXpNnxuWCt8oiUjhWCUpNqVm2nDJbUdlRFTzYb2fS
+ ZC4r0oQaPD5kMLSipjcwzMWe0PniySxNvKXKInFbAoGBAKxW2qD7uKKKuQSOQUft
+ aAksMmEIAHWKTDdvOA2VG6XvX5DHBLXmy08s7rPfqW06ZjCPCDq4Velzvgvc9koX
+ d/lP6cvqlL9za+x6p5wjPQ4rEt/CfmdcmOE4eY+1EgLrUt314LHGjjG3ScWAiirE
+ QyDrGOIGaYoQf89L3KqIMr0JAoGARYAklw8nSSCUvmXHe+Gf0yKA9M/haG28dCwo
+ 780RsqZ3FBEXmYk1EYvCFqQX56jJ25MWX2n/tJcdpifz8Q2ikHcfiTHSI187YI34
+ lKQPFgWb08m1NnwoWrY//yx63BqWz1vjymqNQ5GwutC8XJi5/6Xp+tGGiRuEgJGH
+ EIPUKpkCgYAjBIVMkpNiLCREZ6b+qjrPV96ed3iTUt7TqP7yGlFI/OkORFS38xqC
+ hBP6Fk8iNWuOWQD+ohM/vMMnvIhk5jwlcwn+kF0ra04gi5KBFWSh/ddWMJxUtPC1
+ 2htvlEc6zQAR6QfqXHmwhg1hP81JcpqpicQzCMhkzLoR1DC6stXdLg==
+ -----END RSA PRIVATE KEY-----
+ octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2Oc8MmxBOgcG2ioijXmZ1Jil+LzPoMUyLwZujQoI3fc5Sfm45y1t22NR966G8jqnHVIKe/JaLT0W3x5bCr4rAsIYptTEu+oqW24nsrcsisZeS36apk3g71cp5Up9kf6ZBaSTFFaBfavxEo1XcaR0213vhWOE/5HpdIolDVnxvt4czXS/oiNNj+M9zOMr57IJ4SPiptKdXx4qWouGGq65JBGZQ7YNFKMtV2l1/YEHj8F1YWwNg6ZfuZvySkSv29D5zUkoxcPAPp6HPJTyQT7WRWbnM54TLgd1ggym9R83j0/VqdFXYhJDVkT6vbYgAwqXS16SsYfR7/U0/UMXmsg0z
salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
salt_master_address: 172.16.10.90
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
stacklight_enabled: 'True'
fluentd_enabled: 'True'
stacklight_log_address: 172.16.10.60
@@ -181,3 +217,19 @@
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
+ manila_enabled: 'True'
+ manila_share_backend: 'lvm'
+ manila_lvm_volume_name: 'manila-volume'
+ manila_lvm_devices: '/dev/vdc'
+ openstack_share_address: 172.16.10.203
+ openstack_share_node01_address: 172.16.10.204
+ openstack_share_node01_deploy_address: 192.168.10.204
+ openstack_share_hostname: share
+ openstack_share_node01_hostname: share01
+ openstack_octavia_enabled: 'True'
+ octavia_hm_bind_ip: 192.168.1.12
+ octavia_lb_mgmt_cidr: 192.168.1.0/24
+ octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
+ octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
+
+
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
index 0f806cf..f1ba914 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
@@ -1,5 +1,5 @@
nodes:
- cfg01.mcp11-ovs-dpdk.local:
+ cfg01.mcp-pike-dvr.local:
reclass_storage_name: infra_config_node01
roles:
- infra_config
@@ -10,16 +10,14 @@
ens4:
role: single_ctl
- ctl01.mcp11-ovs-dpdk.local:
+ ctl01.mcp-pike-dvr.local:
reclass_storage_name: openstack_control_node01
roles:
- infra_kvm
- openstack_control_leader
- openstack_database_leader
- openstack_message_queue
-# - features_designate_pool_manager_database
-# - features_designate_pool_manager
-# - features_designate_pool_manager_keystone
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -27,15 +25,14 @@
ens4:
role: single_ctl
- ctl02.mcp11-ovs-dpdk.local:
+ ctl02.mcp-pike-dvr.local:
reclass_storage_name: openstack_control_node02
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
-# - features_designate_pool_manager_database
-# - features_designate_pool_manager
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -43,15 +40,14 @@
ens4:
role: single_ctl
- ctl03.mcp11-ovs-dpdk.local:
+ ctl03.mcp-pike-dvr.local:
reclass_storage_name: openstack_control_node03
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
-# - features_designate_pool_manager_database
-# - features_designate_pool_manager
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -59,11 +55,10 @@
ens4:
role: single_ctl
- prx01.mcp11-ovs-dpdk.local:
+ prx01.mcp-pike-dvr.local:
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
-# - features_designate_pool_manager_proxy
- linux_system_codename_xenial
interfaces:
ens3:
@@ -71,7 +66,7 @@
ens4:
role: single_ctl
- mon01.mcp11-ovs-dpdk.local:
+ mon01.mcp-pike-dvr.local:
reclass_storage_name: stacklight_server_node01
roles:
- stacklightv2_server_leader
@@ -82,7 +77,7 @@
ens4:
role: single_ctl
- mon02.mcp11-ovs-dpdk.local:
+ mon02.mcp-pike-dvr.local:
reclass_storage_name: stacklight_server_node02
roles:
- stacklightv2_server
@@ -93,7 +88,7 @@
ens4:
role: single_ctl
- mon03.mcp11-ovs-dpdk.local:
+ mon03.mcp-pike-dvr.local:
reclass_storage_name: stacklight_server_node03
roles:
- stacklightv2_server
@@ -104,7 +99,7 @@
ens4:
role: single_ctl
- log01.mcp11-ovs-dpdk.local:
+ log01.mcp-pike-dvr.local:
reclass_storage_name: stacklight_log_node01
roles:
- stacklight_log_leader_v2
@@ -115,7 +110,7 @@
ens4:
role: single_ctl
- log02.mcp11-ovs-dpdk.local:
+ log02.mcp-pike-dvr.local:
reclass_storage_name: stacklight_log_node02
roles:
- stacklight_log
@@ -126,7 +121,7 @@
ens4:
role: single_ctl
- log03.mcp11-ovs-dpdk.local:
+ log03.mcp-pike-dvr.local:
reclass_storage_name: stacklight_log_node03
roles:
- stacklight_log
@@ -137,7 +132,7 @@
ens4:
role: single_ctl
- mtr01.mcp11-ovs-dpdk.local:
+ mtr01.mcp-pike-dvr.local:
reclass_storage_name: stacklight_telemetry_node01
roles:
- stacklight_telemetry_leader
@@ -148,7 +143,7 @@
ens4:
role: single_ctl
- mtr02.mcp11-ovs-dpdk.local:
+ mtr02.mcp-pike-dvr.local:
reclass_storage_name: stacklight_telemetry_node02
roles:
- stacklight_telemetry
@@ -159,7 +154,7 @@
ens4:
role: single_ctl
- mtr03.mcp11-ovs-dpdk.local:
+ mtr03.mcp-pike-dvr.local:
reclass_storage_name: stacklight_telemetry_node03
roles:
- stacklight_telemetry
@@ -171,10 +166,11 @@
role: single_ctl
# Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
+ cmp<<count>>.mcp-pike-dvr.local:
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
+ - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
@@ -186,10 +182,9 @@
ens6:
role: bond1_ab_ovs_floating
- gtw01.mcp11-ovs-dpdk.local:
+ gtw01.mcp-pike-dvr.local:
reclass_storage_name: openstack_gateway_node01
roles:
- - openstack_gateway
- linux_system_codename_xenial
interfaces:
ens3:
@@ -201,36 +196,35 @@
ens6:
role: bond1_ab_ovs_floating
-# dns01.mcp11-ovs-dpdk.local:
-# reclass_storage_name: openstack_dns_node01
-# roles:
-# - features_designate_pool_manager_dns
-# - linux_system_codename_xenial
-# classes:
-# - system.linux.system.repo.mcp.extra
-# - system.linux.system.repo.mcp.apt_mirantis.openstack
-# - system.linux.system.repo.mcp.apt_mirantis.ubuntu
-# - system.linux.system.repo.mcp.apt_mirantis.saltstack
-# interfaces:
-# ens3:
-# role: single_dhcp
-# ens4:
-# role: single_ctl
-# single_address: ${_param:openstack_dns_node01_address}
-#
-# dns02.mcp11-ovs-dpdk.local:
-# reclass_storage_name: openstack_dns_node02
-# roles:
-# - features_designate_pool_manager_dns
-# - linux_system_codename_xenial
-# classes:
-# - system.linux.system.repo.mcp.extra
-# - system.linux.system.repo.mcp.apt_mirantis.openstack
-# - system.linux.system.repo.mcp.apt_mirantis.ubuntu
-# - system.linux.system.repo.mcp.apt_mirantis.saltstack
-# interfaces:
-# ens3:
-# role: single_dhcp
-# ens4:
-# role: single_ctl
-# single_address: ${_param:openstack_dns_node02_address}
+ share01.mcp-pike-dvr.local:
+ reclass_storage_name: openstack_share_node01
+ roles:
+ - openstack_share
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dns01.mcp-pike-dvr.local:
+ reclass_storage_name: openstack_dns_node01
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dns02.mcp-pike-dvr.local:
+ reclass_storage_name: openstack_dns_node02
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml
index 6e69e30..a39d636 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml
@@ -1,119 +1,21 @@
{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
index 21ffdc3..59e85e3 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
@@ -35,123 +35,29 @@
node_name: {{ HOSTNAME_CFG01 }}
{%- endif %}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+# Install OpenStack control services
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-# isntall designate
-#- description: Install powerdns
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@powerdns:server' state.sls powerdns.server
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-#- description: Install designate
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@designate:server' state.sls designate -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 5, delay: 10}
-# skip_fail: false
-
{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE(INSTALL_BIND=true) }}
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
-# Install compute node
+{{ SHARED_OPENSTACK.MACRO_INSTALL_MANILA() }}
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_OCTAVIA_API() }}
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_OCTAVIA_MANAGER() }}
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml
index e640c6d..c5a459f 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml
@@ -5,7 +5,7 @@
{% from 'cookied-mcp-pike-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-mcp-pike-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
index f135f1e..32ec67d 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
@@ -30,8 +30,9 @@
{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-#{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-#{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
template:
@@ -61,9 +62,10 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
-# default_{{ HOSTNAME_DNS01 }}: +111
-# default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_DNS01 }}: +113
+ default_{{ HOSTNAME_DNS02 }}: +114
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+90, -10]
@@ -89,9 +91,10 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
-# default_{{ HOSTNAME_DNS01 }}: +111
-# default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_DNS01 }}: +113
+ default_{{ HOSTNAME_DNS02 }}: +114
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+90, -10]
@@ -117,9 +120,10 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
-# default_{{ HOSTNAME_DNS01 }}: +111
-# default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_DNS01 }}: +113
+ default_{{ HOSTNAME_DNS02 }}: +114
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+10, -10]
@@ -145,9 +149,10 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
-# default_{{ HOSTNAME_DNS01 }}: +111
-# default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_DNS01 }}: +113
+ default_{{ HOSTNAME_DNS02 }}: +114
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+10, -10]
@@ -259,9 +264,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -300,9 +302,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -329,9 +328,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -592,9 +588,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -622,6 +615,12 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -673,6 +672,12 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -711,54 +716,80 @@
interfaces: *all_interfaces
network_config: *all_network_config
-# - name: {{ HOSTNAME_DNS01 }}
-# role: salt_minion
-# params:
-# vcpu: !os_env SLAVE_NODE_CPU, 1
-# memory: !os_env SLAVE_NODE_MEMORY, 2048
-# boot:
-# - hd
-# cloud_init_volume_name: iso
-# cloud_init_iface_up: ens3
-# volumes:
-# - name: system
-# capacity: !os_env NODE_VOLUME_SIZE, 150
-# backing_store: mcp_ubuntu_1604_image
-# format: qcow2
-# - name: iso # Volume with name 'iso' will be used
-# # for store image with cloud-init metadata.
-# capacity: 1
-# format: raw
-# device: cdrom
-# bus: ide
-# cloudinit_meta_data: *cloudinit_meta_data
-# cloudinit_user_data: *cloudinit_user_data_1604
-#
-# interfaces: *all_interfaces
-# network_config: *all_network_config
-#
-# - name: {{ HOSTNAME_DNS02 }}
-# role: salt_minion
-# params:
-# vcpu: !os_env SLAVE_NODE_CPU, 1
-# memory: !os_env SLAVE_NODE_MEMORY, 2048
-# boot:
-# - hd
-# cloud_init_volume_name: iso
-# cloud_init_iface_up: ens3
-# volumes:
-# - name: system
-# capacity: !os_env NODE_VOLUME_SIZE, 150
-# backing_store: mcp_ubuntu_1604_image
-# format: qcow2
-# - name: iso # Volume with name 'iso' will be used
-# # for store image with cloud-init metadata.
-# capacity: 1
-# format: raw
-# device: cdrom
-# bus: ide
-# cloudinit_meta_data: *cloudinit_meta_data
-# cloudinit_user_data: *cloudinit_user_data_1604
-#
-# interfaces: *all_interfaces
-# network_config: *all_network_config
+ - name: {{ HOSTNAME_SHARE01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_DNS01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_DNS02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
index cd29897..80ca7f6 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
@@ -13,7 +13,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 192.168.10.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
@@ -51,6 +51,9 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 172.16.10.100
openstack_control_hostname: ctl
openstack_control_node01_address: 172.16.10.101
@@ -100,6 +103,10 @@
openstack_proxy_node02_address: 172.16.10.122
openstack_proxy_node02_hostname: prx02
openstack_upgrade_node01_address: 172.16.10.19
+ designate_backend: powerdns
+ designate_enabled: 'True'
+ openstack_dns_node01_address: 172.16.10.113
+ openstack_dns_node02_address: 172.16.10.114
openstack_version: pike
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
@@ -145,7 +152,7 @@
salt_master_address: 172.16.10.90
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
fluentd_enabled: 'True'
stacklight_enabled: 'True'
stacklight_log_address: 172.16.10.60
@@ -181,3 +188,12 @@
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
+ manila_enabled: 'True'
+ manila_share_backend: 'lvm'
+ manila_lvm_volume_name: 'manila-volume'
+ manila_lvm_devices: '/dev/vdc'
+ openstack_share_address: 172.16.10.203
+ openstack_share_node01_address: 172.16.10.204
+ openstack_share_node01_deploy_address: 192.168.10.204
+ openstack_share_hostname: share
+ openstack_share_node01_hostname: share01
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
index 4c7091b..d57ceaf 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
@@ -1,5 +1,5 @@
nodes:
- cfg01.mcp11-ovs-dpdk.local:
+ cfg01.mcp-pike-ovs.local:
reclass_storage_name: infra_config_node01
roles:
- infra_config
@@ -10,17 +10,14 @@
ens4:
role: single_ctl
- ctl01.mcp11-ovs-dpdk.local:
+ ctl01.mcp-pike-ovs.local:
reclass_storage_name: openstack_control_node01
roles:
- infra_kvm
- openstack_control_leader
- openstack_database_leader
- openstack_message_queue
- # - features_designate_bind9_database
- # - features_designate_bind9_dns
- # - features_designate_bind9
- # - features_designate_bind9_keystone
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -28,16 +25,14 @@
ens4:
role: single_ctl
- ctl02.mcp11-ovs-dpdk.local:
+ ctl02.mcp-pike-ovs.local:
reclass_storage_name: openstack_control_node02
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
- # - features_designate_bind9_database
- # - features_designate_bind9_dns
- # - features_designate_bind9
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -45,15 +40,14 @@
ens4:
role: single_ctl
- ctl03.mcp11-ovs-dpdk.local:
+ ctl03.mcp-pike-ovs.local:
reclass_storage_name: openstack_control_node03
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
- # - features_designate_bind9_database
- # - features_designate_bind9
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -61,11 +55,10 @@
ens4:
role: single_ctl
- prx01.mcp11-ovs-dpdk.local:
+ prx01.mcp-pike-ovs.local:
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
- # - features_designate_bind9_proxy
- linux_system_codename_xenial
interfaces:
ens3:
@@ -73,7 +66,7 @@
ens4:
role: single_ctl
- mon01.mcp11-ovs-dpdk.local:
+ mon01.mcp-pike-ovs.local:
reclass_storage_name: stacklight_server_node01
roles:
- stacklightv2_server_leader
@@ -84,7 +77,7 @@
ens4:
role: single_ctl
- mon02.mcp11-ovs-dpdk.local:
+ mon02.mcp-pike-ovs.local:
reclass_storage_name: stacklight_server_node02
roles:
- stacklightv2_server
@@ -95,7 +88,7 @@
ens4:
role: single_ctl
- mon03.mcp11-ovs-dpdk.local:
+ mon03.mcp-pike-ovs.local:
reclass_storage_name: stacklight_server_node03
roles:
- stacklightv2_server
@@ -106,7 +99,7 @@
ens4:
role: single_ctl
- log01.mcp11-ovs-dpdk.local:
+ log01.mcp-pike-ovs.local:
reclass_storage_name: stacklight_log_node01
roles:
- stacklight_log_leader_v2
@@ -117,7 +110,7 @@
ens4:
role: single_ctl
- log02.mcp11-ovs-dpdk.local:
+ log02.mcp-pike-ovs.local:
reclass_storage_name: stacklight_log_node02
roles:
- stacklight_log
@@ -128,7 +121,7 @@
ens4:
role: single_ctl
- log03.mcp11-ovs-dpdk.local:
+ log03.mcp-pike-ovs.local:
reclass_storage_name: stacklight_log_node03
roles:
- stacklight_log
@@ -139,7 +132,7 @@
ens4:
role: single_ctl
- mtr01.mcp11-ovs-dpdk.local:
+ mtr01.mcp-pike-ovs.local:
reclass_storage_name: stacklight_telemetry_node01
roles:
- stacklight_telemetry_leader
@@ -150,7 +143,7 @@
ens4:
role: single_ctl
- mtr02.mcp11-ovs-dpdk.local:
+ mtr02.mcp-pike-ovs.local:
reclass_storage_name: stacklight_telemetry_node02
roles:
- stacklight_telemetry
@@ -161,7 +154,7 @@
ens4:
role: single_ctl
- mtr03.mcp11-ovs-dpdk.local:
+ mtr03.mcp-pike-ovs.local:
reclass_storage_name: stacklight_telemetry_node03
roles:
- stacklight_telemetry
@@ -173,10 +166,11 @@
role: single_ctl
# Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
+ cmp<<count>>.mcp-pike-ovs.local:
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
+ - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
@@ -188,7 +182,7 @@
ens6:
role: bond1_ab_ovs_floating
- gtw01.mcp11-ovs-dpdk.local:
+ gtw01.mcp-pike-ovs.local:
reclass_storage_name: openstack_gateway_node01
roles:
- openstack_gateway
@@ -202,3 +196,36 @@
role: bond0_ab_ovs_vxlan_mesh
ens6:
role: bond1_ab_ovs_floating
+
+ dns01.mcp-pike-ovs.local:
+ reclass_storage_name: openstack_dns_node01
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dns02.mcp-pike-ovs.local:
+ reclass_storage_name: openstack_dns_node02
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ share01.mcp-pike-ovs.local:
+ reclass_storage_name: openstack_share_node01
+ roles:
+ - openstack_share
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml
index 78acdf1..06946d4 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml
@@ -1,119 +1,20 @@
{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
{% import 'shared-backup-restore.yaml' as BACKUP with context %}
# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
index 8eb1d57..de8e65e 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
@@ -14,123 +14,22 @@
# Install OpenStack control services
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-# isntall designate
-#- description: Install bind
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@bind:server' state.sls bind
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-#- description: Install designate
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@designate:server' state.sls designate -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 5, delay: 10}
-# skip_fail: false
-
{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE(INSTALL_POWERDNS=true) }}
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_MANILA() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml
index 3fd0958..9c13b64 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml
@@ -5,7 +5,7 @@
{% from 'cookied-mcp-pike-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-mcp-pike-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
@@ -28,21 +28,3 @@
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: Hack gtw node
- cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Hack cmp01 node
- cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Hack cmp02 node
- cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
index 89cf22b..d1c83dd 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
@@ -30,7 +30,10 @@
{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
template:
devops_settings:
@@ -59,7 +62,10 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +113
+ default_{{ HOSTNAME_DNS02 }}: +114
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+90, -10]
@@ -85,7 +91,10 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +113
+ default_{{ HOSTNAME_DNS02 }}: +114
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+90, -10]
@@ -111,7 +120,10 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +113
+ default_{{ HOSTNAME_DNS02 }}: +114
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+10, -10]
@@ -137,7 +149,10 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +113
+ default_{{ HOSTNAME_DNS02 }}: +114
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+10, -10]
@@ -249,9 +264,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -290,9 +302,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -319,9 +328,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -582,9 +588,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -611,6 +614,12 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -662,6 +671,12 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -699,3 +714,81 @@
interfaces: *all_interfaces
network_config: *all_network_config
+
+ - name: {{ HOSTNAME_DNS01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_DNS02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_SHARE01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
new file mode 100644
index 0000000..bfcc3fd
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
@@ -0,0 +1,239 @@
+default_context:
+ barbican_backend: dogtag
+ barbican_enabled: 'False'
+ auditd_enabled: 'True'
+ backend_network_netmask: 255.255.255.0
+ backend_network_subnet: 10.167.4.0/24
+ backend_vlan: '10'
+ backup_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpQIBAAKCAQEAuY7v++mza4e75f80GYE2iIdZ30d7yvT6Xym00iD/OxRWNtXe
+ rIh7M0X30Q0F2D3hVvPz57axTheOK3xFRVvPoIZjm3fVgwNzQmTyfAZz4TOdTtWx
+ 9cye8Bo20qlRpq8wFQMSDhgRv0J1iX6LjJsr8pM1A8q3e4GYnv0DrLBZ1Iq7+T/k
+ qzzsT7PuvGvEK63J/DaE6BI73QG+0W0MvblddznwXvXLo/VlBXajWOv37YHiMFMT
+ Zap7lTvGVEyxByVEM04Bo7ABF2PEPwGrGL9nOpJ1LSxBCcryNVyZbveFF/e8A1Cj
+ 178rD+W4H5p2Agr5A/y3LZpTkyhnTtWXzwT3YwIDAQABAoIBACiUNa8lgHM3j8PQ
+ d5hMRZy93M2TWGMeB9Lf0AdT5/0HiYMcakHY5vhjiLpS2sBbZ/gYCXLW5Rdq11Bz
+ MMLmPRWhzg6lui+YhZAze0PcNWM+YlxnJy/Vu7xOP0b6eDy3exBdR4mFgfwNkJ6s
+ 6d+p34aA4ssdfdqokLPUKQWO21Y7UVYbht6Tv55nd3YMGXHxJ0phitf7/dFsEX9Z
+ sPSdWqkYMP2UWQBrFSjxV9Q+kE8OQ1VYDFCRa/9a5QHMrFo/0dOxLkZosTcCHM8A
+ H2RHPcKrxFWn7A3eAiA4VCvtM8RX239Bi7Gdvfl1HflSkQwBDUV8F2RZLHM2NU2T
+ EGBQcuECgYEA4ZBwZAtJIQ0R35prGLFj+drb/IKr+x2WD9WOZ83cheGSwdCRk/he
+ zZ5cCKgmSqg9cDJ4/vraoqmAlwQ4uj4e1TudgHPwdDUPuwoveIbUfUyzdIZMt0s4
+ fe61AUhEniIOi09H+E2yHz6OWSw3uA4SKkNsMT4RZc4Nag3Fo86Rrj8CgYEA0piY
+ HMYPHposfjVNM0PMU9F1lwQJMdx3a55JYgUc8cMvrsZPzvwJqrGCMNjP4lPwl/AS
+ x73yaxcxEYGiG6wDkvx+hujjyAx+sal62EB9ofJGDI7u8L2/0voW53RWvTUBsy8e
+ +xOQTewCAAYGLIJnGfEyVqEAu9IPwz3pep8xtd0CgYEAruTusDOr9SuMI0M5LQFG
+ UpHnJogvT1smYoqki0osZcZ8ozjT19aps2bJV5EBd7uxP5BzDsl0wtEIuo90aLwH
+ 7i/2NIYw9/m4g78nBZ4NnkXdk0karLhvSf3PbPoa8j3X5x6G4DlmFiHL/8pwPY7z
+ eL+kYR4OIVC+R+/7wcJGZMMCgYEAqOLg0epvw53mYoxCTgNoACvw/lupOAhS6MY2
+ mVn6XVOnkKTO6fIrmmziOGQXSq0APAi2NuL4XrNpkV2BcGmhMCY3Hd/0k8CZdcax
+ km0dk1skm/ugWQYCqKIQ7irZSMESjO0UDkwhJKxI6lXqa5VkM2S/dsOFQBp0s6GZ
+ 9NFn3y0CgYEAogzKchxouu4BgqHn76W0IB/XeTuiCDSGRv+IwMoghxbPoT6lO920
+ OHWoo+bX3VuxpCFkN2fFH6V8WncUrv4ItAgxGftL8h9BhMRKiatwOBAw0vG/CO2G
+ CIyvmjhIvpIdAl8i1jIJw1sn/ZVYm8+ZKy4VAqPevc3Ze7WGoMUkFyg=
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5ju/76bNrh7vl/zQZgTaIh1nfR3vK9PpfKbTSIP87FFY21d6siHszRffRDQXYPeFW8/PntrFOF44rfEVFW8+ghmObd9WDA3NCZPJ8BnPhM51O1bH1zJ7wGjbSqVGmrzAVAxIOGBG/QnWJfouMmyvykzUDyrd7gZie/QOssFnUirv5P+SrPOxPs+68a8Qrrcn8NoToEjvdAb7RbQy9uV13OfBe9cuj9WUFdqNY6/ftgeIwUxNlqnuVO8ZUTLEHJUQzTgGjsAEXY8Q/AasYv2c6knUtLEEJyvI1XJlu94UX97wDUKPXvysP5bgfmnYCCvkD/LctmlOTKGdO1ZfPBPdj
+ bmk_enabled: 'False'
+ ceph_cluster_network: 10.167.4.0/24
+ ceph_enabled: 'True'
+ ceph_hyper_converged: 'False'
+ ceph_mon_node01_address: 10.167.4.66
+ ceph_mon_node01_hostname: cmn01
+ ceph_mon_node02_address: 10.167.4.67
+ ceph_mon_node02_hostname: cmn02
+ ceph_mon_node03_address: 10.167.4.68
+ ceph_mon_node03_hostname: cmn03
+ ceph_osd_backend: bluestore
+ ceph_osd_block_db_size: '10'
+ ceph_osd_bond_mode: active-backup
+ ceph_osd_count: '2'
+ ceph_osd_data_disks: /dev/vdb
+ ceph_osd_journal_or_block_db_disks: /dev/vdc
+ ceph_osd_node_count: '2'
+ ceph_osd_journal_size: '10'
+ ceph_osd_primary_first_nic: eth1
+ ceph_osd_primary_second_nic: eth2
+ ceph_osd_rack01_backend_subnet: 10.167.4
+ ceph_osd_rack01_hostname: osd
+ ceph_osd_rack01_single_subnet: 10.167.4
+ ceph_osd_single_address_ranges: 10.167.4.94-10.167.4.95
+ ceph_osd_deploy_address_ranges: 10.167.5.94-10.167.5.95
+ ceph_osd_backend_address_ranges: 10.167.4.94-10.167.4.95
+ ceph_public_network: 10.167.4.0/24
+ ceph_rgw_address: 10.167.4.75
+ ceph_rgw_hostname: rgw
+ ceph_rgw_node01_address: 10.167.4.76
+ ceph_rgw_node01_hostname: rgw01
+ ceph_rgw_node02_address: 10.167.4.77
+ ceph_rgw_node02_hostname: rgw02
+ ceph_rgw_node03_address: 10.167.4.78
+ ceph_rgw_node03_hostname: rgw03
+ ceph_version: luminous
+ cicd_enabled: 'False'
+ cluster_domain: cookied-mcp-queens-dvr-ceph.local
+ cluster_name: cookied-mcp-queens-dvr-ceph
+ compute_bond_mode: active-backup
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: tekHhhWzn3YrxKbXGMvtWYj1usHGrRBYd2gfFwWNCnRentwCu1QKANHvpIeZCRvz
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.4.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: 'proposed'
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+ deploy_network_gateway: 10.167.5.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: 10.167.5.0/24
+ deployment_type: physical
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
+ email_address: obutenko@mirantis.com
+ gainsight_service_enabled: 'False'
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.4.11
+ infra_kvm01_deploy_address: 10.167.5.11
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.4.12
+ infra_kvm02_deploy_address: 10.167.5.12
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.4.13
+ infra_kvm03_deploy_address: 10.167.5.13
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.4.10
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ internal_proxy_enabled: 'False'
+ kubernetes_ctl_on_kvm: 'False'
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 10.167.5.15
+ maas_deploy_network_name: deploy_network
+ maas_deploy_range_end: 10.167.5.230
+ maas_deploy_range_start: 10.167.5.20
+ maas_deploy_vlan: '0'
+ maas_enabled: 'False'
+ maas_fabric_name: deploy_fabric
+ maas_hostname: cfg01
+ mcp_common_scripts_branch: 'proposed'
+ mcp_version: proposed
+ no_platform: 'False'
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openssh_groups: ''
+ openstack_benchmark_node01_address: 10.167.4.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 10.167.4
+ openstack_compute_rack01_tenant_subnet: 10.167.6
+ openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
+ openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
+ openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
+ openstack_dns_hostname: dns
+ openstack_dns_node01_address: 10.167.4.111
+ openstack_dns_node01_hostname: dns01
+ openstack_dns_node02_address: 10.167.4.112
+ openstack_dns_node02_hostname: dns02
+ openstack_control_address: 10.167.4.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.4.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.4.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.4.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.4.10
+ openstack_database_hostname: ctl
+ openstack_database_node01_address: 10.167.4.11
+ openstack_database_node01_hostname: ctl01
+ openstack_database_node02_address: 10.167.4.12
+ openstack_database_node02_hostname: ctl02
+ openstack_database_node03_address: 10.167.4.13
+ openstack_database_node03_hostname: ctl03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 10.167.4.224
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 10.167.6.6
+ openstack_gateway_node02_address: 10.167.4.225
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.167.6.7
+ openstack_gateway_node03_address: 10.167.4.226
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 10.167.6.8
+ openstack_message_queue_address: 10.167.4.10
+ openstack_message_queue_hostname: ctl
+ openstack_message_queue_node01_address: 10.167.4.11
+ openstack_message_queue_node01_hostname: ctl01
+ openstack_message_queue_node02_address: 10.167.4.12
+ openstack_message_queue_node02_hostname: ctl02
+ openstack_message_queue_node03_address: 10.167.4.13
+ openstack_message_queue_node03_hostname: ctl03
+ openstack_network_engine: ovs
+ openstack_neutron_bgp_vpn: 'False'
+ openstack_neutron_bgp_vpn_driver: bagpipe
+ openstack_neutron_qos: 'False'
+ openstack_neutron_vlan_aware_vms: 'False'
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_nova_compute_reserved_host_memory_mb: '900'
+ openstack_ovs_dvr_enabled: 'True'
+ openstack_ovs_encapsulation_type: vxlan
+ openstack_proxy_address: 10.167.4.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.4.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.4.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.4.19
+ openstack_version: queens
+ osd_padding_with_zeros: 'False'
+ oss_enabled: 'False'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: BX7ium4MaRPIWBdyhj4LTbiedwg3yLep
+ salt_api_password_hash: $6$qYqzkiRP$MiqA5ZMfsmdXJcuTTyeCgNPv9CBGO5nSH4HwRKPGUh0MFXcEa8JDCUEtS8xLHCkol7CMdq.l6CG7of0iaUJ.u.
+ salt_master_address: 10.167.4.15
+ salt_master_hostname: cfg01
+ salt_master_management_address: 10.167.5.15
+ shared_reclass_branch: 'proposed'
+ shared_reclass_url: https://github.com/Mirantis/reclass-system-salt-model.git
+ sriov_network_subnet: 10.55.0.0/16
+ stacklight_enabled: 'False'
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.167.6.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.167.6.0/24
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
+ version: proposed
+ vnf_onboarding_enabled: 'False'
+ rsync_fernet_rotation: 'True'
+ compute_padding_with_zeros: False
+ designate_backend: bind
+ designate_enabled: 'False'
+ nova_vnc_tls_enabled: 'False'
+ galera_ssl_enabled: 'False'
+ openstack_mysql_x509_enabled: 'False'
+ rabbitmq_ssl_enabled: 'False'
+ openstack_rabbitmq_x509_enabled: 'False'
+ tenant_telemetry_enabled: 'True'
+ gnocchi_aggregation_storage: ceph
+ openstack_telemetry_address: 172.16.10.83
+ openstack_telemetry_hostname: mdb
+ openstack_telemetry_node01_address: 172.16.10.84
+ openstack_telemetry_node01_hostname: mdb01
+ openstack_telemetry_node02_address: 172.16.10.85
+ openstack_telemetry_node02_hostname: mdb02
+ openstack_telemetry_node03_address: 172.16.10.86
+ openstack_telemetry_node03_hostname: mdb03
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml
new file mode 100644
index 0000000..fcce951
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml
@@ -0,0 +1,19 @@
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
new file mode 100644
index 0000000..636187b
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
@@ -0,0 +1,46 @@
+{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+{% import 'shared-ceph.yaml' as SHARED_CEPH with context %}
+
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MONS() }}
+
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MGR() }}
+
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_OSD_AND_RADOSGW() }}
+
+{{ SHARED_CEPH.CONNECT_CEPH_TO_SERVICES() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_REDIS() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GNOCCHI() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_PANKO() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CEILOMETER() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_AODH() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/salt.yaml
new file mode 100644
index 0000000..dd8fd17
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/salt.yaml
@@ -0,0 +1,30 @@
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--meta-data.yaml
similarity index 100%
copy from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml
similarity index 87%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml
index a73ca23..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml
@@ -44,16 +44,14 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- # Enable grub menu using updated config below
- - update-grub
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- path: /etc/network/interfaces
content: |
auto ens3
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml
similarity index 69%
copy from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml
index da7908d..979424f 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml
@@ -25,13 +25,13 @@
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
+ - export TERM=linux
+ - export LANG=C
# Configure dhclient
- sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- sudo resolvconf -u
# Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- sudo ifup ens3
#- sudo route add default gw {gateway} {interface_name}
@@ -40,22 +40,10 @@
- chmod 600 /swapfile
- mkswap /swapfile
- swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
write_files:
- path: /etc/network/interfaces
content: |
auto ens3
iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml
similarity index 60%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml
index de5427a..fe31142 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml
@@ -1,9 +1,8 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-{% import 'virtual-mcp-ocata-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-mcp-queens-dvr-ceph/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
aliases:
@@ -12,25 +11,31 @@
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-dvr') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-queens-dvr-ceph') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_RGW01 = os_env('HOSTNAME_CMN01', 'rgw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_RGW02 = os_env('HOSTNAME_CMN02', 'rgw02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_RGW03 = os_env('HOSTNAME_CMN03', 'rgw03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd2.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'cookied-mcp-queens-dvr-ceph_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
private-pool01:
@@ -39,21 +44,27 @@
ip_reserved:
gateway: +1
l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CFG01 }}: +15
+ default_{{ HOSTNAME_CTL01 }}: +11
+ default_{{ HOSTNAME_CTL02 }}: +12
+ default_{{ HOSTNAME_CTL03 }}: +13
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_OSD01 }}: +94
+ default_{{ HOSTNAME_OSD02 }}: +95
+ default_{{ HOSTNAME_CMN01 }}: +96
+ default_{{ HOSTNAME_CMN02 }}: +97
+ default_{{ HOSTNAME_CMN03 }}: +98
+ default_{{ HOSTNAME_RGW01 }}: +76
+ default_{{ HOSTNAME_RGW02 }}: +77
+ default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_MDB01 }}: +84
+ default_{{ HOSTNAME_MDB02 }}: +85
+ default_{{ HOSTNAME_MDB03 }}: +86
ip_ranges:
- dhcp: [+90, -10]
+ dhcp: [+70, -10]
admin-pool01:
net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
@@ -61,21 +72,27 @@
ip_reserved:
gateway: +1
l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CFG01 }}: +15
+ default_{{ HOSTNAME_CTL01 }}: +11
+ default_{{ HOSTNAME_CTL02 }}: +12
+ default_{{ HOSTNAME_CTL03 }}: +13
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_OSD01 }}: +94
+ default_{{ HOSTNAME_OSD02 }}: +95
+ default_{{ HOSTNAME_CMN01 }}: +96
+ default_{{ HOSTNAME_CMN02 }}: +97
+ default_{{ HOSTNAME_CMN03 }}: +98
+ default_{{ HOSTNAME_RGW01 }}: +76
+ default_{{ HOSTNAME_RGW02 }}: +77
+ default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_MDB01 }}: +84
+ default_{{ HOSTNAME_MDB02 }}: +85
+ default_{{ HOSTNAME_MDB03 }}: +86
ip_ranges:
- dhcp: [+90, -10]
+ dhcp: [+70, -10]
tenant-pool01:
net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
@@ -83,19 +100,25 @@
ip_reserved:
gateway: +1
l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CFG01 }}: +15
+ default_{{ HOSTNAME_CTL01 }}: +11
+ default_{{ HOSTNAME_CTL02 }}: +12
+ default_{{ HOSTNAME_CTL03 }}: +13
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_OSD01 }}: +94
+ default_{{ HOSTNAME_OSD02 }}: +95
+ default_{{ HOSTNAME_CMN01 }}: +96
+ default_{{ HOSTNAME_CMN02 }}: +97
+ default_{{ HOSTNAME_CMN03 }}: +98
+ default_{{ HOSTNAME_RGW01 }}: +76
+ default_{{ HOSTNAME_RGW02 }}: +77
+ default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_MDB01 }}: +84
+ default_{{ HOSTNAME_MDB02 }}: +85
+ default_{{ HOSTNAME_MDB03 }}: +86
ip_ranges:
dhcp: [+10, -10]
@@ -105,22 +128,27 @@
ip_reserved:
gateway: +1
l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CFG01 }}: +15
+ default_{{ HOSTNAME_CTL01 }}: +11
+ default_{{ HOSTNAME_CTL02 }}: +12
+ default_{{ HOSTNAME_CTL03 }}: +13
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_OSD01 }}: +94
+ default_{{ HOSTNAME_OSD02 }}: +95
+ default_{{ HOSTNAME_CMN01 }}: +96
+ default_{{ HOSTNAME_CMN02 }}: +97
+ default_{{ HOSTNAME_CMN03 }}: +98
+ default_{{ HOSTNAME_RGW01 }}: +76
+ default_{{ HOSTNAME_RGW02 }}: +77
+ default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_MDB01 }}: +84
+ default_{{ HOSTNAME_MDB02 }}: +85
+ default_{{ HOSTNAME_MDB03 }}: +86
ip_ranges:
- dhcp: [+10, -10]
-
+ dhcp: [+130, +230]
groups:
- name: default
@@ -128,7 +156,6 @@
name: devops.driver.libvirt
params:
connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
stp: False
hpet: False
enable_acpi: true
@@ -164,7 +191,6 @@
forward:
mode: route
-
group_volumes:
- name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
@@ -172,11 +198,8 @@
- name: cfg01_day01_image # Pre-configured day01 image
source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
+ - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
+ source_image: !os_env MCP_IMAGE_PATH1604
format: qcow2
nodes:
@@ -232,9 +255,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -273,9 +293,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -302,9 +319,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -314,14 +328,16 @@
cloudinit_meta_data: *cloudinit_meta_data
cloudinit_user_data: *cloudinit_user_data_1604
+
+
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON01 }}
+ - name: {{ HOSTNAME_CMN01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
cloud_init_volume_name: iso
@@ -329,7 +345,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
+ backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -343,62 +359,10 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON02 }}
+ - name: {{ HOSTNAME_MDB01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
+ vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
@@ -424,6 +388,282 @@
interfaces: *interfaces
network_config: *network_config
+ - name: {{ HOSTNAME_MDB02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MDB03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CMN02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CMN03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_OSD01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: ceph_osd
+ capacity: 50
+ format: qcow2
+ - name: ceph_journal
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_OSD02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: ceph_osd
+ capacity: 50
+ format: qcow2
+ - name: ceph_journal
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_RGW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_RGW02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+ - name: {{ HOSTNAME_RGW03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
- name: {{ HOSTNAME_CMP01 }}
role: salt_minion
@@ -505,8 +745,8 @@
- name: {{ HOSTNAME_GTW01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
@@ -527,55 +767,3 @@
interfaces: *all_interfaces
network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml
new file mode 100644
index 0000000..28831da
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml
@@ -0,0 +1,206 @@
+nodes:
+ cfg01.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl01.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - infra_kvm
+ - openstack_control_leader
+ - openstack_database_leader
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl02.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl03.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx01.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw01.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ osd<<count>>.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: ceph_osd_rack01
+ roles:
+ - ceph_osd
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cmn01.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: ceph_mon_node01
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cmn02.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: ceph_mon_node02
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cmn03.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: ceph_mon_node03
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ rgw01.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: ceph_rgw_node01
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ rgw02.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: ceph_rgw_node02
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ rgw03.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: ceph_rgw_node03
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+
+ mdb01.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: openstack_telemetry_node01
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mdb02.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: openstack_telemetry_node02
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mdb03.cookied-mcp-queens-dvr-ceph.local:
+ reclass_storage_name: openstack_telemetry_node03
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml
similarity index 72%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml
index 505282d..1cec753 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml
@@ -1,9 +1,12 @@
default_context:
+ barbican_backend: dogtag
+ barbican_enabled: 'True'
+ auditd_enabled: 'True'
bmk_enabled: 'False'
ceph_enabled: 'False'
cicd_enabled: 'False'
- cluster_domain: virtual-mcp-ocata-dvr.local
- cluster_name: virtual-mcp-ocata-dvr
+ cluster_domain: cookied-mcp-queens-dvr-ssl-barbican.local
+ cluster_name: cookied-mcp-queens-dvr-ssl-barbican
compute_bond_mode: active-backup
compute_primary_first_nic: eth1
compute_primary_second_nic: eth2
@@ -13,7 +16,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 192.168.10.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
@@ -41,7 +44,8 @@
local_repositories: 'False'
maas_deploy_address: 192.168.10.90
maas_hostname: cfg01
- mcp_version: stable
+ maas_enabled: 'False'
+ mcp_version: proposed
offline_deployment: 'False'
opencontrail_enabled: 'False'
openstack_benchmark_node01_address: 172.16.10.95
@@ -51,6 +55,9 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 172.16.10.100
openstack_control_hostname: ctl
openstack_control_node01_address: 172.16.10.101
@@ -100,7 +107,12 @@
openstack_proxy_node02_address: 172.16.10.122
openstack_proxy_node02_hostname: prx02
openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: ocata
+ openstack_dns_hostname: dns
+ openstack_dns_node01_address: 172.16.10.113
+ openstack_dns_node01_hostname: dns01
+ openstack_dns_node02_address: 172.16.10.114
+ openstack_dns_node02_hostname: dns02
+ openstack_version: queens
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
oss_webhook_app_id: '24'
@@ -145,36 +157,35 @@
salt_master_address: 172.16.10.90
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
+ stacklight_enabled: 'False'
+ stacklight_log_address: 172.16.10.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 172.16.10.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 172.16.10.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 172.16.10.63
+ stacklight_log_node03_hostname: log03
stacklight_monitor_address: 172.16.10.70
stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
+ stacklight_monitor_node01_address: 172.16.10.71
stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
+ stacklight_monitor_node02_address: 172.16.10.72
stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
+ stacklight_monitor_node03_address: 172.16.10.73
stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
+ stacklight_telemetry_address: 172.16.10.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 172.16.10.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 172.16.10.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 172.16.10.88
+ stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
+ stacklight_long_term_storage_type: influxdb
static_ips_on_deploy_network_enabled: 'False'
tenant_network_gateway: 10.1.0.1
tenant_network_netmask: 255.255.255.0
@@ -182,3 +193,32 @@
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
+ rsync_fernet_rotation: 'True'
+ compute_padding_with_zeros: False
+ designate_backend: bind
+ designate_enabled: 'False'
+ nova_vnc_tls_enabled: 'True'
+ galera_ssl_enabled: 'True'
+ openstack_mysql_x509_enabled: 'True'
+ rabbitmq_ssl_enabled: 'True'
+ openstack_rabbitmq_x509_enabled: 'True'
+ openstack_internal_protocol: 'https'
+ tenant_telemetry_enabled: 'False'
+ gnocchi_aggregation_storage: file
+ manila_enabled: 'False'
+ manila_share_backend: 'lvm'
+ manila_lvm_volume_name: 'manila-volume'
+ manila_lvm_devices: '/dev/vdc'
+ openstack_share_address: 172.16.10.203
+ openstack_share_node01_address: 172.16.10.204
+ openstack_share_node01_deploy_address: 192.168.10.204
+ openstack_share_hostname: share
+ openstack_share_node01_hostname: share01
+ openstack_barbican_address: 172.16.10.44
+ openstack_barbican_hostname: kmn
+ openstack_barbican_node01_address: 172.16.10.45
+ openstack_barbican_node01_hostname: kmn01
+ openstack_barbican_node02_address: 172.16.10.46
+ openstack_barbican_node02_hostname: kmn02
+ openstack_barbican_node03_address: 172.16.10.47
+ openstack_barbican_node03_hostname: kmn03
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-environment.yaml
similarity index 69%
copy from tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-environment.yaml
index 3e05cf0..7102e9c 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-environment.yaml
@@ -1,5 +1,5 @@
nodes:
- cfg01.mcp11-ovs-dpdk.local:
+ cfg01.mcp-queens-dvr-ssl.local:
reclass_storage_name: infra_config_node01
roles:
- infra_config
@@ -10,17 +10,14 @@
ens4:
role: single_ctl
- ctl01.mcp11-ovs-dpdk.local:
+ ctl01.mcp-queens-dvr-ssl.local:
reclass_storage_name: openstack_control_node01
roles:
- infra_kvm
- openstack_control_leader
- openstack_database_leader
- openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
- - features_designate_bind9_keystone
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -28,16 +25,14 @@
ens4:
role: single_ctl
- ctl02.mcp11-ovs-dpdk.local:
+ ctl02.mcp-queens-dvr-ssl.local:
reclass_storage_name: openstack_control_node02
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -45,15 +40,14 @@
ens4:
role: single_ctl
- ctl03.mcp11-ovs-dpdk.local:
+ ctl03.mcp-queens-dvr-ssl.local:
reclass_storage_name: openstack_control_node03
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -61,50 +55,43 @@
ens4:
role: single_ctl
- prx01.mcp11-ovs-dpdk.local:
+ kmn01.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_barbican_node01
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kmn02.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_barbican_node02
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kmn03.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_barbican_node03
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx01.mcp-queens-dvr-ssl.local:
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
- - features_designate_bind9_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- linux_system_codename_xenial
interfaces:
ens3:
@@ -113,10 +100,11 @@
role: single_ctl
# Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
+ cmp<<count>>.mcp-queens-dvr-ssl.local:
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
+ - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
@@ -128,7 +116,7 @@
ens6:
role: bond1_ab_ovs_floating
- gtw01.mcp11-ovs-dpdk.local:
+ gtw01.mcp-queens-dvr-ssl.local:
reclass_storage_name: openstack_gateway_node01
roles:
- openstack_gateway
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/core.yaml
new file mode 100644
index 0000000..4efe25c
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/core.yaml
@@ -0,0 +1,19 @@
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/openstack.yaml
new file mode 100644
index 0000000..5a2bdac
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/openstack.yaml
@@ -0,0 +1,36 @@
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+# Install OpenStack control services
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_DOGTAG() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_BARBICAN() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/salt.yaml
new file mode 100644
index 0000000..edfadef
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/salt.yaml
@@ -0,0 +1,43 @@
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+- description: "Temp fix"
+ cmd: |
+ set -e;
+ apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+ [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
+ . /root/venv-reclass-tools/bin/activate;
+ pip install git+https://github.com/dis-xcom/reclass-tools;
+ reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--meta-data.yaml
similarity index 100%
copy from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
similarity index 87%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
index a73ca23..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
@@ -44,16 +44,14 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- # Enable grub menu using updated config below
- - update-grub
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- path: /etc/network/interfaces
content: |
auto ens3
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data1604.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml
similarity index 86%
copy from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml
index 382dba4..c7dd479 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml
@@ -1,9 +1,9 @@
# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% import 'virtual-mcp-ocata-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-mcp-queens-dvr-ssl-barbican/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
aliases:
@@ -12,23 +12,23 @@
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-queens-dvr-ssl-barbican') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KMN01 = os_env('HOSTNAME_KMN01', 'kmn01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KMN02 = os_env('HOSTNAME_KMN02', 'kmn02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KMN03 = os_env('HOSTNAME_KMN03', 'kmn03.' + DOMAIN_NAME) %}
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'cookied-mcp-queens-dvr-ssl-barbican_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
private-pool01:
@@ -43,11 +43,11 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_KMN01 }}: +45
+ default_{{ HOSTNAME_KMN02 }}: +46
+ default_{{ HOSTNAME_KMN03 }}: +47
ip_ranges:
dhcp: [+90, -10]
@@ -63,11 +63,11 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_KMN01 }}: +45
+ default_{{ HOSTNAME_KMN02 }}: +46
+ default_{{ HOSTNAME_KMN03 }}: +47
ip_ranges:
dhcp: [+90, -10]
@@ -83,11 +83,11 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_KMN01 }}: +45
+ default_{{ HOSTNAME_KMN02 }}: +46
+ default_{{ HOSTNAME_KMN03 }}: +47
ip_ranges:
dhcp: [+10, -10]
@@ -103,14 +103,10 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
- dhcp: [+10, -10]
-
+ dhcp: [+130, +220]
groups:
- name: default
@@ -150,11 +146,10 @@
external:
address_pool: external-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
-
group_volumes:
- name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
@@ -162,11 +157,8 @@
- name: cfg01_day01_image # Pre-configured day01 image
source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
+ - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
+ source_image: !os_env MCP_IMAGE_PATH1604
format: qcow2
nodes:
@@ -222,9 +214,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -263,9 +252,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -292,9 +278,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -307,11 +290,11 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON01 }}
+ - name: {{ HOSTNAME_KMN01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -333,11 +316,11 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON02 }}
+ - name: {{ HOSTNAME_KMN02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -359,11 +342,11 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON03 }}
+ - name: {{ HOSTNAME_KMN03 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -399,9 +382,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -428,6 +408,12 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -479,6 +465,12 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -494,8 +486,8 @@
- name: {{ HOSTNAME_GTW01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-cookiecutter-mcp-queens-dvr-ssl.yaml
similarity index 72%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-cookiecutter-mcp-queens-dvr-ssl.yaml
index 505282d..dcc8bc5 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-cookiecutter-mcp-queens-dvr-ssl.yaml
@@ -1,9 +1,12 @@
default_context:
+ barbican_backend: dogtag
+ barbican_enabled: 'False'
+ auditd_enabled: 'True'
bmk_enabled: 'False'
ceph_enabled: 'False'
cicd_enabled: 'False'
- cluster_domain: virtual-mcp-ocata-dvr.local
- cluster_name: virtual-mcp-ocata-dvr
+ cluster_domain: cookied-mcp-queens-dvr-ssl.local
+ cluster_name: cookied-mcp-queens-dvr-ssl
compute_bond_mode: active-backup
compute_primary_first_nic: eth1
compute_primary_second_nic: eth2
@@ -13,7 +16,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 192.168.10.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
@@ -41,6 +44,7 @@
local_repositories: 'False'
maas_deploy_address: 192.168.10.90
maas_hostname: cfg01
+ maas_enabled: 'False'
mcp_version: stable
offline_deployment: 'False'
opencontrail_enabled: 'False'
@@ -51,6 +55,9 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 172.16.10.100
openstack_control_hostname: ctl
openstack_control_node01_address: 172.16.10.101
@@ -100,7 +107,12 @@
openstack_proxy_node02_address: 172.16.10.122
openstack_proxy_node02_hostname: prx02
openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: ocata
+ openstack_dns_hostname: dns
+ openstack_dns_node01_address: 172.16.10.113
+ openstack_dns_node01_hostname: dns01
+ openstack_dns_node02_address: 172.16.10.114
+ openstack_dns_node02_hostname: dns02
+ openstack_version: queens
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
oss_webhook_app_id: '24'
@@ -145,40 +157,68 @@
salt_master_address: 172.16.10.90
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
fluentd_enabled: 'True'
stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
+ stacklight_log_address: 172.16.10.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 172.16.10.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 172.16.10.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 172.16.10.63
+ stacklight_log_node03_hostname: log03
stacklight_monitor_address: 172.16.10.70
stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
+ stacklight_monitor_node01_address: 172.16.10.71
stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
+ stacklight_monitor_node02_address: 172.16.10.72
stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
+ stacklight_monitor_node03_address: 172.16.10.73
stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
+ stacklight_telemetry_address: 172.16.10.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 172.16.10.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 172.16.10.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 172.16.10.88
+ stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
+ stacklight_long_term_storage_type: influxdb
static_ips_on_deploy_network_enabled: 'False'
+ openstack_telemetry_address: 172.16.10.96
+ openstack_telemetry_hostname: mdb
+ openstack_telemetry_node01_address: 172.16.10.97
+ openstack_telemetry_node01_hostname: mdb01
+ openstack_telemetry_node02_address: 172.16.10.98
+ openstack_telemetry_node02_hostname: mdb02
+ openstack_telemetry_node03_address: 172.16.10.99
+ openstack_telemetry_node03_hostname: mdb03
tenant_network_gateway: 10.1.0.1
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: 10.1.0.0/24
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
+ rsync_fernet_rotation: 'True'
+ compute_padding_with_zeros: False
+ designate_backend: bind
+ designate_enabled: 'False'
+ nova_vnc_tls_enabled: 'True'
+ galera_ssl_enabled: 'True'
+ openstack_mysql_x509_enabled: 'True'
+ rabbitmq_ssl_enabled: 'True'
+ openstack_rabbitmq_x509_enabled: 'True'
+ openstack_internal_protocol: 'https'
+ tenant_telemetry_enabled: 'True'
+ gnocchi_aggregation_storage: file
+ manila_enabled: 'False'
+ manila_share_backend: 'lvm'
+ manila_lvm_volume_name: 'manila-volume'
+ manila_lvm_devices: '/dev/vdc'
+ openstack_share_address: 172.16.10.203
+ openstack_share_node01_address: 172.16.10.204
+ openstack_share_node01_deploy_address: 192.168.10.204
+ openstack_share_hostname: share
+ openstack_share_node01_hostname: share01
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml
new file mode 100644
index 0000000..b1c7e3d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml
@@ -0,0 +1,231 @@
+nodes:
+ cfg01.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl01.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - infra_kvm
+ - openstack_control_leader
+ - openstack_database_leader
+ - openstack_message_queue
+ - features_lvm_backend_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl02.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_lvm_backend_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl03.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_lvm_backend_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx01.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon01.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon02.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon03.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log01.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log02.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log03.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr01.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr02.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr03.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - features_lvm_backend_volume_vdb
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw01.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ mdb01.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_telemetry_node01
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mdb02.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_telemetry_node02
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mdb03.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_telemetry_node03
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/core.yaml
new file mode 100644
index 0000000..e10bccc
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/core.yaml
@@ -0,0 +1,19 @@
+{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/openstack.yaml
new file mode 100644
index 0000000..278b78b
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/openstack.yaml
@@ -0,0 +1,42 @@
+{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+# Install OpenStack control services
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_REDIS() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GNOCCHI() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_PANKO() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CEILOMETER() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_AODH() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/overrides-policy.yml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/overrides-policy.yml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl/overrides-policy.yml
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/salt.yaml
new file mode 100644
index 0000000..eaf8a1f
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/salt.yaml
@@ -0,0 +1,43 @@
+{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+- description: "Temp fix"
+ cmd: |
+ set -e;
+ apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+ [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
+ . /root/venv-reclass-tools/bin/activate;
+ pip install git+https://github.com/dis-xcom/reclass-tools;
+ reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/sl.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/sl.yaml
new file mode 100644
index 0000000..7d65097
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/sl.yaml
@@ -0,0 +1,24 @@
+{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-sl.yaml' as SHARED_SL with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
+
+{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
+
+{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
+
+{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
+
+{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
+
+{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
+
+{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
+
+{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
+
+{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
+
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--meta-data.yaml
similarity index 100%
copy from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data-cfg01.yaml
similarity index 87%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data-cfg01.yaml
index a73ca23..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data-cfg01.yaml
@@ -44,16 +44,14 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- # Enable grub menu using updated config below
- - update-grub
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- path: /etc/network/interfaces
content: |
auto ens3
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data1604.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay.yaml
similarity index 61%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay.yaml
index de5427a..f6a8998 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay.yaml
@@ -1,9 +1,9 @@
# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% import 'virtual-mcp-ocata-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-mcp-queens-dvr-ssl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-queens-dvr-ssl/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-queens-dvr-ssl/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
aliases:
@@ -12,25 +12,33 @@
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-dvr') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-queens-dvr-ssl') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'cookied-mcp-queens-dvr-ssl_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
private-pool01:
@@ -45,13 +53,21 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_MDB01 }}: +97
+ default_{{ HOSTNAME_MDB02 }}: +98
+ default_{{ HOSTNAME_MDB03 }}: +99
default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+90, -10]
@@ -67,13 +83,21 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_MDB01 }}: +97
+ default_{{ HOSTNAME_MDB02 }}: +98
+ default_{{ HOSTNAME_MDB03 }}: +99
default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+90, -10]
@@ -89,13 +113,21 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_MDB01 }}: +97
+ default_{{ HOSTNAME_MDB02 }}: +98
+ default_{{ HOSTNAME_MDB03 }}: +99
default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+10, -10]
@@ -111,15 +143,23 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_MDB01 }}: +97
+ default_{{ HOSTNAME_MDB02 }}: +98
+ default_{{ HOSTNAME_MDB03 }}: +99
default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
- dhcp: [+10, -10]
+ dhcp: [+130, +220]
groups:
@@ -160,7 +200,7 @@
external:
address_pool: external-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
@@ -172,11 +212,8 @@
- name: cfg01_day01_image # Pre-configured day01 image
source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
+ - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
+ source_image: !os_env MCP_IMAGE_PATH1604
format: qcow2
nodes:
@@ -232,9 +269,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -273,9 +307,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -302,6 +333,90 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MDB01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MDB02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MDB03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
- name: cinder
capacity: 50
format: qcow2
@@ -320,8 +435,8 @@
- name: {{ HOSTNAME_MON01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
@@ -346,8 +461,8 @@
- name: {{ HOSTNAME_MON02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
@@ -372,8 +487,164 @@
- name: {{ HOSTNAME_MON03 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
@@ -399,7 +670,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
cloud_init_volume_name: iso
@@ -409,9 +680,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -439,6 +707,12 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -490,6 +764,12 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -505,8 +785,8 @@
- name: {{ HOSTNAME_GTW01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
@@ -527,55 +807,3 @@
interfaces: *all_interfaces
network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
index f43d3f7..15f8d68 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
@@ -1,4 +1,7 @@
default_context:
+ barbican_backend: dogtag
+ barbican_enabled: 'False'
+ auditd_enabled: 'True'
bmk_enabled: 'False'
ceph_enabled: 'False'
cicd_enabled: 'False'
@@ -13,7 +16,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 192.168.10.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
@@ -41,6 +44,7 @@
local_repositories: 'False'
maas_deploy_address: 192.168.10.90
maas_hostname: cfg01
+ maas_enabled: 'False'
mcp_version: stable
offline_deployment: 'False'
opencontrail_enabled: 'False'
@@ -51,6 +55,9 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 172.16.10.100
openstack_control_hostname: ctl
openstack_control_node01_address: 172.16.10.101
@@ -100,6 +107,11 @@
openstack_proxy_node02_address: 172.16.10.122
openstack_proxy_node02_hostname: prx02
openstack_upgrade_node01_address: 172.16.10.19
+ openstack_dns_hostname: dns
+ openstack_dns_node01_address: 172.16.10.113
+ openstack_dns_node01_hostname: dns01
+ openstack_dns_node02_address: 172.16.10.114
+ openstack_dns_node02_hostname: dns02
openstack_version: queens
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
@@ -145,9 +157,9 @@
salt_master_address: 172.16.10.90
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
- stacklight_enabled: 'True'
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
fluentd_enabled: 'True'
+ stacklight_enabled: 'True'
stacklight_log_address: 172.16.10.60
stacklight_log_hostname: log
stacklight_log_node01_address: 172.16.10.61
@@ -173,7 +185,7 @@
stacklight_telemetry_node03_address: 172.16.10.88
stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
- stacklight_long_term_storage_type: prometheus
+ stacklight_long_term_storage_type: influxdb
static_ips_on_deploy_network_enabled: 'False'
tenant_network_gateway: 10.1.0.1
tenant_network_netmask: 255.255.255.0
@@ -181,3 +193,23 @@
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
+ rsync_fernet_rotation: 'True'
+ compute_padding_with_zeros: False
+ designate_backend: bind
+ designate_enabled: 'True'
+ nova_vnc_tls_enabled: 'False'
+ galera_ssl_enabled: 'False'
+ openstack_mysql_x509_enabled: 'False'
+ rabbitmq_ssl_enabled: 'False'
+ openstack_rabbitmq_x509_enabled: 'False'
+ tenant_telemetry_enabled: 'False'
+ gnocchi_aggregation_storage: file
+ manila_enabled: 'True'
+ manila_share_backend: 'lvm'
+ manila_lvm_volume_name: 'manila-volume'
+ manila_lvm_devices: '/dev/vdc'
+ openstack_share_address: 172.16.10.203
+ openstack_share_node01_address: 172.16.10.204
+ openstack_share_node01_deploy_address: 192.168.10.204
+ openstack_share_hostname: share
+ openstack_share_node01_hostname: share01
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
index 0f806cf..081c51d 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
@@ -1,5 +1,5 @@
nodes:
- cfg01.mcp11-ovs-dpdk.local:
+ cfg01.mcp-queens-dvr.local:
reclass_storage_name: infra_config_node01
roles:
- infra_config
@@ -10,16 +10,14 @@
ens4:
role: single_ctl
- ctl01.mcp11-ovs-dpdk.local:
+ ctl01.mcp-queens-dvr.local:
reclass_storage_name: openstack_control_node01
roles:
- infra_kvm
- openstack_control_leader
- openstack_database_leader
- openstack_message_queue
-# - features_designate_pool_manager_database
-# - features_designate_pool_manager
-# - features_designate_pool_manager_keystone
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -27,15 +25,14 @@
ens4:
role: single_ctl
- ctl02.mcp11-ovs-dpdk.local:
+ ctl02.mcp-queens-dvr.local:
reclass_storage_name: openstack_control_node02
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
-# - features_designate_pool_manager_database
-# - features_designate_pool_manager
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -43,15 +40,14 @@
ens4:
role: single_ctl
- ctl03.mcp11-ovs-dpdk.local:
+ ctl03.mcp-queens-dvr.local:
reclass_storage_name: openstack_control_node03
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
-# - features_designate_pool_manager_database
-# - features_designate_pool_manager
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -59,11 +55,10 @@
ens4:
role: single_ctl
- prx01.mcp11-ovs-dpdk.local:
+ prx01.mcp-queens-dvr.local:
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
-# - features_designate_pool_manager_proxy
- linux_system_codename_xenial
interfaces:
ens3:
@@ -71,7 +66,7 @@
ens4:
role: single_ctl
- mon01.mcp11-ovs-dpdk.local:
+ mon01.mcp-queens-dvr.local:
reclass_storage_name: stacklight_server_node01
roles:
- stacklightv2_server_leader
@@ -82,7 +77,7 @@
ens4:
role: single_ctl
- mon02.mcp11-ovs-dpdk.local:
+ mon02.mcp-queens-dvr.local:
reclass_storage_name: stacklight_server_node02
roles:
- stacklightv2_server
@@ -93,7 +88,7 @@
ens4:
role: single_ctl
- mon03.mcp11-ovs-dpdk.local:
+ mon03.mcp-queens-dvr.local:
reclass_storage_name: stacklight_server_node03
roles:
- stacklightv2_server
@@ -104,7 +99,7 @@
ens4:
role: single_ctl
- log01.mcp11-ovs-dpdk.local:
+ log01.mcp-queens-dvr.local:
reclass_storage_name: stacklight_log_node01
roles:
- stacklight_log_leader_v2
@@ -115,7 +110,7 @@
ens4:
role: single_ctl
- log02.mcp11-ovs-dpdk.local:
+ log02.mcp-queens-dvr.local:
reclass_storage_name: stacklight_log_node02
roles:
- stacklight_log
@@ -126,7 +121,7 @@
ens4:
role: single_ctl
- log03.mcp11-ovs-dpdk.local:
+ log03.mcp-queens-dvr.local:
reclass_storage_name: stacklight_log_node03
roles:
- stacklight_log
@@ -137,7 +132,7 @@
ens4:
role: single_ctl
- mtr01.mcp11-ovs-dpdk.local:
+ mtr01.mcp-queens-dvr.local:
reclass_storage_name: stacklight_telemetry_node01
roles:
- stacklight_telemetry_leader
@@ -148,7 +143,7 @@
ens4:
role: single_ctl
- mtr02.mcp11-ovs-dpdk.local:
+ mtr02.mcp-queens-dvr.local:
reclass_storage_name: stacklight_telemetry_node02
roles:
- stacklight_telemetry
@@ -159,7 +154,7 @@
ens4:
role: single_ctl
- mtr03.mcp11-ovs-dpdk.local:
+ mtr03.mcp-queens-dvr.local:
reclass_storage_name: stacklight_telemetry_node03
roles:
- stacklight_telemetry
@@ -171,10 +166,11 @@
role: single_ctl
# Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
+ cmp<<count>>.mcp-queens-dvr.local:
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
+ - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
@@ -186,7 +182,7 @@
ens6:
role: bond1_ab_ovs_floating
- gtw01.mcp11-ovs-dpdk.local:
+ gtw01.mcp-queens-dvr.local:
reclass_storage_name: openstack_gateway_node01
roles:
- openstack_gateway
@@ -201,36 +197,35 @@
ens6:
role: bond1_ab_ovs_floating
-# dns01.mcp11-ovs-dpdk.local:
-# reclass_storage_name: openstack_dns_node01
-# roles:
-# - features_designate_pool_manager_dns
-# - linux_system_codename_xenial
-# classes:
-# - system.linux.system.repo.mcp.extra
-# - system.linux.system.repo.mcp.apt_mirantis.openstack
-# - system.linux.system.repo.mcp.apt_mirantis.ubuntu
-# - system.linux.system.repo.mcp.apt_mirantis.saltstack
-# interfaces:
-# ens3:
-# role: single_dhcp
-# ens4:
-# role: single_ctl
-# single_address: ${_param:openstack_dns_node01_address}
-#
-# dns02.mcp11-ovs-dpdk.local:
-# reclass_storage_name: openstack_dns_node02
-# roles:
-# - features_designate_pool_manager_dns
-# - linux_system_codename_xenial
-# classes:
-# - system.linux.system.repo.mcp.extra
-# - system.linux.system.repo.mcp.apt_mirantis.openstack
-# - system.linux.system.repo.mcp.apt_mirantis.ubuntu
-# - system.linux.system.repo.mcp.apt_mirantis.saltstack
-# interfaces:
-# ens3:
-# role: single_dhcp
-# ens4:
-# role: single_ctl
-# single_address: ${_param:openstack_dns_node02_address}
+ share01.mcp-queens-dvr.local:
+ reclass_storage_name: openstack_share_node01
+ roles:
+ - openstack_share
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dns01.mcp-queens-dvr.local:
+ reclass_storage_name: openstack_dns_node01
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dns02.mcp-queens-dvr.local:
+ reclass_storage_name: openstack_dns_node02
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml
index 92b21a5..293863a 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml
@@ -1,119 +1,19 @@
{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/openstack.yaml
index 14cf12e..70cc4f5 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/openstack.yaml
@@ -15,143 +15,22 @@
# Install OpenStack control services
-{%- if OVERRIDE_POLICY != '' %}
-- description: Upload policy override
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: overrides-policy.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Create custom cluster control class
- cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Rename control classes
- cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
- ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
-{%- endif %}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-# isntall designate
-#- description: Install powerdns
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@powerdns:server' state.sls powerdns.server
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-#- description: Install designate
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@designate:server' state.sls designate -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 5, delay: 10}
-# skip_fail: false
-
{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE(INSTALL_BIND=true) }}
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_MANILA() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/salt.yaml
index 80896e9..8c922d2 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/salt.yaml
@@ -5,7 +5,7 @@
{% from 'cookied-mcp-queens-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-mcp-queens-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
@@ -15,6 +15,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
index 1d323ba..414187b 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
@@ -1,256 +1,24 @@
{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-sl.yaml' as SHARED_SL with context %}
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-# Install MongoDB for alerta
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Create MongoDB cluster
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 20}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
- cmd: |
- FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Fluentd service presence: ${FLUENTD_SERVICE}";
- if [[ "$FLUENTD_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- else
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
- cmd: |
- CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Ceilometer service presence: ${CEILO}";
- if [[ "$CEILO" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus,heka.remote_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Launch containers
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+
{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
index 7c1628c..f6d9b98 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
@@ -18,8 +18,8 @@
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
@@ -30,9 +30,10 @@
{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-#{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-#{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
template:
devops_settings:
@@ -61,9 +62,10 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
-# default_{{ HOSTNAME_DNS01 }}: +111
-# default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_DNS01 }}: +113
+ default_{{ HOSTNAME_DNS02 }}: +114
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+90, -10]
@@ -89,9 +91,10 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
-# default_{{ HOSTNAME_DNS01 }}: +111
-# default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_DNS01 }}: +113
+ default_{{ HOSTNAME_DNS02 }}: +114
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+90, -10]
@@ -117,9 +120,10 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
-# default_{{ HOSTNAME_DNS01 }}: +111
-# default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_DNS01 }}: +113
+ default_{{ HOSTNAME_DNS02 }}: +114
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+10, -10]
@@ -145,11 +149,12 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
-# default_{{ HOSTNAME_DNS01 }}: +111
-# default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_DNS01 }}: +113
+ default_{{ HOSTNAME_DNS02 }}: +114
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
- dhcp: [+10, -10]
+ dhcp: [+130, +220]
groups:
@@ -259,9 +264,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -300,9 +302,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -329,9 +328,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -592,9 +588,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -622,6 +615,12 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -673,6 +672,12 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -711,54 +716,80 @@
interfaces: *all_interfaces
network_config: *all_network_config
-# - name: {{ HOSTNAME_DNS01 }}
-# role: salt_minion
-# params:
-# vcpu: !os_env SLAVE_NODE_CPU, 1
-# memory: !os_env SLAVE_NODE_MEMORY, 2048
-# boot:
-# - hd
-# cloud_init_volume_name: iso
-# cloud_init_iface_up: ens3
-# volumes:
-# - name: system
-# capacity: !os_env NODE_VOLUME_SIZE, 150
-# backing_store: mcp_ubuntu_1604_image
-# format: qcow2
-# - name: iso # Volume with name 'iso' will be used
-# # for store image with cloud-init metadata.
-# capacity: 1
-# format: raw
-# device: cdrom
-# bus: ide
-# cloudinit_meta_data: *cloudinit_meta_data
-# cloudinit_user_data: *cloudinit_user_data_1604
-#
-# interfaces: *all_interfaces
-# network_config: *all_network_config
-#
-# - name: {{ HOSTNAME_DNS02 }}
-# role: salt_minion
-# params:
-# vcpu: !os_env SLAVE_NODE_CPU, 1
-# memory: !os_env SLAVE_NODE_MEMORY, 2048
-# boot:
-# - hd
-# cloud_init_volume_name: iso
-# cloud_init_iface_up: ens3
-# volumes:
-# - name: system
-# capacity: !os_env NODE_VOLUME_SIZE, 150
-# backing_store: mcp_ubuntu_1604_image
-# format: qcow2
-# - name: iso # Volume with name 'iso' will be used
-# # for store image with cloud-init metadata.
-# capacity: 1
-# format: raw
-# device: cdrom
-# bus: ide
-# cloudinit_meta_data: *cloudinit_meta_data
-# cloudinit_user_data: *cloudinit_user_data_1604
-#
-# interfaces: *all_interfaces
-# network_config: *all_network_config
+ - name: {{ HOSTNAME_SHARE01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_DNS01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_DNS02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
index fd49492..18a8beb 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
@@ -1,4 +1,7 @@
default_context:
+ barbican_backend: dogtag
+ barbican_enabled: 'False'
+ auditd_enabled: 'True'
bmk_enabled: 'False'
ceph_enabled: 'False'
cicd_enabled: 'False'
@@ -13,7 +16,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 192.168.10.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
@@ -52,6 +55,9 @@
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+ openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+ openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
openstack_control_address: 172.16.10.100
openstack_control_hostname: ctl
openstack_control_node01_address: 172.16.10.101
@@ -101,6 +107,11 @@
openstack_proxy_node02_address: 172.16.10.122
openstack_proxy_node02_hostname: prx02
openstack_upgrade_node01_address: 172.16.10.19
+ openstack_dns_hostname: dns
+ openstack_dns_node01_address: 172.16.10.113
+ openstack_dns_node01_hostname: dns01
+ openstack_dns_node02_address: 172.16.10.114
+ openstack_dns_node02_hostname: dns02
openstack_version: queens
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
@@ -146,7 +157,7 @@
salt_master_address: 172.16.10.90
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
fluentd_enabled: 'True'
stacklight_enabled: 'True'
stacklight_log_address: 172.16.10.60
@@ -184,3 +195,21 @@
use_default_network_scheme: 'False'
rsync_fernet_rotation: 'True'
compute_padding_with_zeros: False
+ designate_backend: powerdns
+ designate_enabled: 'True'
+ nova_vnc_tls_enabled: 'False'
+ galera_ssl_enabled: 'False'
+ openstack_mysql_x509_enabled: 'False'
+ rabbitmq_ssl_enabled: 'False'
+ openstack_rabbitmq_x509_enabled: 'False'
+ tenant_telemetry_enabled: 'False'
+ gnocchi_aggregation_storage: file
+ manila_enabled: 'True'
+ manila_share_backend: 'lvm'
+ manila_lvm_volume_name: 'manila-volume'
+ manila_lvm_devices: '/dev/vdc'
+ openstack_share_address: 172.16.10.203
+ openstack_share_node01_address: 172.16.10.204
+ openstack_share_node01_deploy_address: 192.168.10.204
+ openstack_share_hostname: share
+ openstack_share_node01_hostname: share01
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
index 4c7091b..1593d43 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
@@ -1,5 +1,5 @@
nodes:
- cfg01.mcp11-ovs-dpdk.local:
+ cfg01.mcp-queens-ovs.local:
reclass_storage_name: infra_config_node01
roles:
- infra_config
@@ -10,17 +10,14 @@
ens4:
role: single_ctl
- ctl01.mcp11-ovs-dpdk.local:
+ ctl01.mcp-queens-ovs.local:
reclass_storage_name: openstack_control_node01
roles:
- infra_kvm
- openstack_control_leader
- openstack_database_leader
- openstack_message_queue
- # - features_designate_bind9_database
- # - features_designate_bind9_dns
- # - features_designate_bind9
- # - features_designate_bind9_keystone
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -28,16 +25,14 @@
ens4:
role: single_ctl
- ctl02.mcp11-ovs-dpdk.local:
+ ctl02.mcp-queens-ovs.local:
reclass_storage_name: openstack_control_node02
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
- # - features_designate_bind9_database
- # - features_designate_bind9_dns
- # - features_designate_bind9
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -45,15 +40,14 @@
ens4:
role: single_ctl
- ctl03.mcp11-ovs-dpdk.local:
+ ctl03.mcp-queens-ovs.local:
reclass_storage_name: openstack_control_node03
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
- # - features_designate_bind9_database
- # - features_designate_bind9
+ - features_lvm_backend_control
- linux_system_codename_xenial
interfaces:
ens3:
@@ -61,11 +55,10 @@
ens4:
role: single_ctl
- prx01.mcp11-ovs-dpdk.local:
+ prx01.mcp-queens-ovs.local:
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
- # - features_designate_bind9_proxy
- linux_system_codename_xenial
interfaces:
ens3:
@@ -73,7 +66,7 @@
ens4:
role: single_ctl
- mon01.mcp11-ovs-dpdk.local:
+ mon01.mcp-queens-ovs.local:
reclass_storage_name: stacklight_server_node01
roles:
- stacklightv2_server_leader
@@ -84,7 +77,7 @@
ens4:
role: single_ctl
- mon02.mcp11-ovs-dpdk.local:
+ mon02.mcp-queens-ovs.local:
reclass_storage_name: stacklight_server_node02
roles:
- stacklightv2_server
@@ -95,7 +88,7 @@
ens4:
role: single_ctl
- mon03.mcp11-ovs-dpdk.local:
+ mon03.mcp-queens-ovs.local:
reclass_storage_name: stacklight_server_node03
roles:
- stacklightv2_server
@@ -106,7 +99,7 @@
ens4:
role: single_ctl
- log01.mcp11-ovs-dpdk.local:
+ log01.mcp-queens-ovs.local:
reclass_storage_name: stacklight_log_node01
roles:
- stacklight_log_leader_v2
@@ -117,7 +110,7 @@
ens4:
role: single_ctl
- log02.mcp11-ovs-dpdk.local:
+ log02.mcp-queens-ovs.local:
reclass_storage_name: stacklight_log_node02
roles:
- stacklight_log
@@ -128,7 +121,7 @@
ens4:
role: single_ctl
- log03.mcp11-ovs-dpdk.local:
+ log03.mcp-queens-ovs.local:
reclass_storage_name: stacklight_log_node03
roles:
- stacklight_log
@@ -139,7 +132,7 @@
ens4:
role: single_ctl
- mtr01.mcp11-ovs-dpdk.local:
+ mtr01.mcp-queens-ovs.local:
reclass_storage_name: stacklight_telemetry_node01
roles:
- stacklight_telemetry_leader
@@ -150,7 +143,7 @@
ens4:
role: single_ctl
- mtr02.mcp11-ovs-dpdk.local:
+ mtr02.mcp-queens-ovs.local:
reclass_storage_name: stacklight_telemetry_node02
roles:
- stacklight_telemetry
@@ -161,7 +154,7 @@
ens4:
role: single_ctl
- mtr03.mcp11-ovs-dpdk.local:
+ mtr03.mcp-queens-ovs.local:
reclass_storage_name: stacklight_telemetry_node03
roles:
- stacklight_telemetry
@@ -173,10 +166,11 @@
role: single_ctl
# Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
+ cmp<<count>>.mcp-queens-ovs.local:
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
+ - features_lvm_backend_volume_vdb
- linux_system_codename_xenial
interfaces:
ens3:
@@ -188,7 +182,7 @@
ens6:
role: bond1_ab_ovs_floating
- gtw01.mcp11-ovs-dpdk.local:
+ gtw01.mcp-queens-ovs.local:
reclass_storage_name: openstack_gateway_node01
roles:
- openstack_gateway
@@ -202,3 +196,36 @@
role: bond0_ab_ovs_vxlan_mesh
ens6:
role: bond1_ab_ovs_floating
+
+ dns01.mcp-queens-dvr.local:
+ reclass_storage_name: openstack_dns_node01
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dns02.mcp-queens-dvr.local:
+ reclass_storage_name: openstack_dns_node02
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ share01.mcp-queens-dvr.local:
+ reclass_storage_name: openstack_share_node01
+ roles:
+ - openstack_share
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml
index afd0d5a..739c58c 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml
@@ -1,119 +1,19 @@
{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/openstack.yaml
index 9f89060..75fd27f 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/openstack.yaml
@@ -8,6 +8,7 @@
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
{% import 'shared-salt.yaml' as SHARED with context %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
@@ -20,70 +21,16 @@
{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE(INSTALL_POWERDNS=true) }}
+
{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_MANILA() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-queens-ovs/overrides-policy.yml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/overrides-policy.yml
copy to tcp_tests/templates/cookied-mcp-queens-ovs/overrides-policy.yml
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/salt.yaml
index 43191b5..1e102d5 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/salt.yaml
@@ -5,7 +5,7 @@
{% from 'cookied-mcp-queens-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-mcp-queens-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
@@ -15,28 +15,15 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-- description: Start compute node addresses from .105 , as in static models
- cmd: |
- sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;ml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
{{ SHARED.MACRO_GENERATE_INVENTORY() }}
{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-- description: Temporary workaround
- cmd: |
- apt install -y python-netaddr;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
index 40eb362..5ab3fd0 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
@@ -1,258 +1,24 @@
{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-sl.yaml' as SHARED_SL with context %}
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-# Install MongoDB for alerta
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Create MongoDB cluster
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 20}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
- cmd: |
- FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Fluentd service presence: ${FLUENTD_SERVICE}";
- if [[ "$FLUENTD_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- else
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
- cmd: |
- CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Ceilometer service presence: ${CEILO}";
- if [[ "$CEILO" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus,heka.remote_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Launch containers
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-#{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
index 3d96226..6ea4098 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
@@ -30,8 +30,10 @@
{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
+{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
template:
devops_settings:
env_name: {{ os_env('ENV_NAME', 'cookied-mcp-queens-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
@@ -59,7 +61,10 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +113
+ default_{{ HOSTNAME_DNS02 }}: +114
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+90, -10]
@@ -85,7 +90,10 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +113
+ default_{{ HOSTNAME_DNS02 }}: +114
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+90, -10]
@@ -111,7 +119,10 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +113
+ default_{{ HOSTNAME_DNS02 }}: +114
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
dhcp: [+10, -10]
@@ -137,9 +148,12 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +113
+ default_{{ HOSTNAME_DNS02 }}: +114
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +204
ip_ranges:
- dhcp: [+10, -10]
+ dhcp: [+130, +220]
groups:
@@ -249,9 +263,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -290,9 +301,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -319,9 +327,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -582,9 +587,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -611,6 +613,12 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -662,6 +670,12 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: manila
+ capacity: 20
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -699,3 +713,81 @@
interfaces: *all_interfaces
network_config: *all_network_config
+
+ - name: {{ HOSTNAME_DNS01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_DNS02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_SHARE01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml
index a42b5f0..ecd3224 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml
@@ -1,7 +1,7 @@
{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-baremetal-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-baremetal-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set LAB_CONFIG_NAME = 'cookied-bm-dpdk-pipeline' %}
{% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
@@ -25,22 +25,6 @@
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-- description: "Workaround for rack01 compute generator"
- cmd: |
- set -e;
- . /root/venv-reclass-tools/bin/activate;
- # Remove rack01 key
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
- # Workaround for compute nodes addresses
- reclass-tools add-key parameters._param.openstack_compute_node01_address '${_param:openstack_compute_rack01_single_subnet}'.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools add-key parameters._param.openstack_compute_node02_address '${_param:openstack_compute_rack01_single_subnet}'.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
- description: Temporary workaround for removing cinder-volume from CTL nodes
cmd: |
sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
index 0238598..9a830b9 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
@@ -21,22 +21,6 @@
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-- description: "Workaround for rack01 compute generator"
- cmd: |
- set -e;
- . /root/venv-reclass-tools/bin/activate;
- # Remove rack01 key
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
- # Workaround for compute nodes addresses
- reclass-tools add-key parameters._param.openstack_compute_node01_address '${_param:openstack_compute_rack01_single_subnet}'.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools add-key parameters._param.openstack_compute_node02_address '${_param:openstack_compute_rack01_single_subnet}'.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
- description: Temporary workaround for removing cinder-volume from CTL nodes
cmd: |
sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
@@ -45,6 +29,14 @@
retry: {count: 1, delay: 5}
skip_fail: true
+- description: Temporary workaround for removing virtual gtw nodes
+ cmd: |
+ sed -i 's/\-\ system\.salt\.control\.sizes\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/\-\ system\.salt\.control\.placement\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
- description: Temporary WR for correct bridge name according to envoronment templates
cmd: |
sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
@@ -55,5 +47,15 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: "WR for PROD-24311"
+ cmd: |
+ set -e;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
index 62da3ec..9970edd 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
@@ -32,12 +32,12 @@
set -e;
. /root/venv-reclass-tools/bin/activate;
# Remove rack01 key
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
# Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
+ reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml --merge;
# Workaround for compute nodes addresses
- reclass-tools add-key parameters._param.openstack_compute_node01_address '${_param:openstack_compute_rack01_single_subnet}'.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools add-key parameters._param.openstack_compute_node02_address '${_param:openstack_compute_rack01_single_subnet}'.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools add-key parameters._param.openstack_compute_node01_address '${_param:openstack_compute_rack01_single_subnet}'.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools add-key parameters._param.openstack_compute_node02_address '${_param:openstack_compute_rack01_single_subnet}'.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
index c727bb1..b8bb9af 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
@@ -1,7 +1,7 @@
{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-baremetal-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-baremetal-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set LAB_CONFIG_NAME = 'cookied-bm-mcp-ovs-dpdk' %}
{% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
@@ -25,22 +25,6 @@
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-- description: "Workaround for rack01 compute generator"
- cmd: |
- set -e;
- . /root/venv-reclass-tools/bin/activate;
- # Remove rack01 key
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
- # Workaround for compute nodes addresses
- reclass-tools add-key parameters._param.openstack_compute_node01_address '${_param:openstack_compute_rack01_single_subnet}'.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools add-key parameters._param.openstack_compute_node02_address '${_param:openstack_compute_rack01_single_subnet}'.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
- description: Temporary workaround for removing cinder-volume from CTL nodes
cmd: |
sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
index 0e8095a..7cf52a7 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
@@ -18,16 +18,4 @@
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- . /root/venv-reclass-tools/bin/activate;
- # Workaround for compute nodes. Auto-registration for compute nodes cannot be used without external address inventory
- reclass-tools add-key parameters._param.kubernetes_compute_node01_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.101 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
- reclass-tools add-key parameters._param.kubernetes_compute_node02_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.102 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
index 776a516..130b3b3 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
@@ -18,16 +18,4 @@
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- . /root/venv-reclass-tools/bin/activate;
- # Workaround for compute nodes. Auto-registration for compute nodes cannot be used without external address inventory
- reclass-tools add-key parameters._param.kubernetes_compute_node01_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.101 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
- reclass-tools add-key parameters._param.kubernetes_compute_node02_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.102 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
index 130d95a..65f3c2b 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
@@ -18,16 +18,4 @@
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-- description: "Workaround for computes"
- cmd: |
- set -e;
- . /root/venv-reclass-tools/bin/activate;
- # Workaround for compute nodes. Auto-registration for compute nodes cannot be used without external address inventory
- reclass-tools add-key parameters._param.kubernetes_compute_node01_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.101 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
- reclass-tools add-key parameters._param.kubernetes_compute_node02_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.102 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml
index 2d4f8f7..e31a230 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml
@@ -21,27 +21,31 @@
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
set -e;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
- # Start compute node addresses from .105 , as in static models
- sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
# set wider cpu mask for DPDK
- salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_lcore_mask' value='"0xF"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_lcore_mask' value='"0x41"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ salt-call reclass.cluster_meta_set name='compute_ovs_pmd_cpu_mask' value='"0xe"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_socket_mem' value='"512,512"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
salt-call reclass.cluster_meta_set name='compute_hugepages_size' value='"2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
salt-call reclass.cluster_meta_set name='compute_hugepages_mount' value='"/mnt/hugepages_2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # set virtual disks for compute
- sed -i 's/cinder_lvm_devices: \[ "\/dev\/sdb" \]/cinder_lvm_devices: \[ "\/dev\/vdb" \]/g' /srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/lvm_backend/init.yml
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
+- description: "Temporary workaround: remove cinder-volume from CTL nodes"
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
cat << 'EOF' >> /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/dpdk.yml
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml
index 91dedab..55d6a8b 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml
@@ -21,13 +21,10 @@
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
set -e;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
- # Start compute node addresses from .105 , as in static models
- sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
# salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
# salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
@@ -35,25 +32,24 @@
# salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Temporary workaround for removing cinder-volume from CTL nodes
+- description: "Temporary workaround: remove cinder-volume from CTL nodes"
cmd: |
sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: true
-
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml
index 0c743dc..c9961c2 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml
@@ -21,29 +21,34 @@
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
set -e;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
- # Start compute node addresses from .105 , as in static models
- sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
# Bind9 services are placed on the first two ctl nodes
# salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
# salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
+- description: "Temporary workaround: remove cinder-volume from CTL nodes"
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml
new file mode 100644
index 0000000..0c7d928
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml
@@ -0,0 +1,55 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-cicd-queens-dvr-sl' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-queens-dvr-sl.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment_context.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+
+ # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ # Workaround of missing reclass.system for dns role
+ # salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: "Temporary workaround: remove cinder-volume from CTL nodes"
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr-ceph.yaml
new file mode 100644
index 0000000..4cdda3b
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr-ceph.yaml
@@ -0,0 +1,94 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+{% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-mitaka-dvr-ceph' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-mitaka-dvr-ceph.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', 'deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME} ' + REPOSITORY_SUITE + ' salt extra') %}
+{% set FORMULA_GPG = os_env('FORMULA_GPG', 'http://apt.mirantis.com/public.gpg') %}
+{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/" + SALT_VERSION + REPOSITORY_SUITE + " main") %}
+{# set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/2016.3 " + REPOSITORY_SUITE + " main") #}
+{% set SALT_GPG = os_env('SALT_GPG', 'http://apt.mirantis.com/public.gpg') %}
+{% set UBUNTU_REPOSITORY = os_env('UBUNTU_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME} main restricted universe") %}
+{% set UBUNTU_UPDATES_REPOSITORY = os_env('UBUNTU_UPDATES_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-updates main restricted universe") %}
+{% set UBUNTU_SECURITY_REPOSITORY = os_env('UBUNTU_SECURITY_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-security main restricted universe") %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+
+
+ # Bind9 services are placed on the first two ctl nodes
+ salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: "Disable designate worker for Mitaka release"
+ cmd: |
+ set -e;
+ salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_bind9/init.yml
+ salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_pool_manager/init.yml
+ salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate/init.yml
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Temporary workaround !! Fix or debug
+ cmd: |
+ sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+ sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
index 22297a6..a54ce3d 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
@@ -1,7 +1,7 @@
{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
@@ -34,32 +34,29 @@
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- # Start compute node addresses from .105 , as in static models
- sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
# Workaround of missing reclass.system for dns role
salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
@@ -76,4 +73,13 @@
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml
index 48e019a..bd28102 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml
@@ -1,7 +1,7 @@
{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
{% set LAB_CONFIG_NAME = 'cookied-mcp-mitaka-ovs' %}
@@ -32,32 +32,29 @@
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- # Start compute node addresses from .105 , as in static models
- sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
# Workaround of missing reclass.system for dns role
salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
@@ -74,4 +71,13 @@
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml
index 570602d..948b051 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml
@@ -1,7 +1,7 @@
{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set LAB_CONFIG_NAME = 'cookied-mcp-newton-dvr' %}
@@ -24,32 +24,29 @@
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- # Start compute node addresses from .105 , as in static models
- sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
# Workaround of missing reclass.system for dns role
salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
@@ -66,4 +63,13 @@
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml
index 318cf87..ee24ff1 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml
@@ -1,7 +1,7 @@
{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set LAB_CONFIG_NAME = 'cookied-mcp-newton-ovs' %}
@@ -24,31 +24,28 @@
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- # Start compute node addresses from .105 , as in static models
- sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
# Bind9 services are placed on the first two ctl nodes
salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
@@ -65,4 +62,13 @@
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml
index 0178514..3211797 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml
@@ -1,7 +1,7 @@
{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set LAB_CONFIG_NAME = 'cookied-mcp-ocata-dop-sl2' %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
index 55e8c2a..7adb184 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
@@ -1,10 +1,10 @@
{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
-{% set LAB_CONFIG_NAME = 'virtual-mcp-ocata-dvr' %}
+{% set LAB_CONFIG_NAME = 'cookied-mcp-ocata-dvr' %}
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
@@ -24,35 +24,41 @@
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- # Start compute node addresses from .105 , as in static models
- sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
# Workaround of missing reclass.system for dns role
salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
index 911b935..0d0bd6b 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
@@ -1,10 +1,10 @@
{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
-{% set LAB_CONFIG_NAME = 'virtual-mcp-ocata-ovs' %}
+{% set LAB_CONFIG_NAME = 'cookied-mcp-ocata-ovs' %}
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
@@ -24,34 +24,40 @@
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- # Start compute node addresses from .105 , as in static models
- sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
# Bind9 services are placed on the first two ctl nodes
salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
index 97313de..b59248a 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
@@ -21,31 +21,35 @@
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
- # Start compute node addresses from .105 , as in static models
- sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
# set wider cpu mask for DPDK
- salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_lcore_mask' value='"0xF"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_lcore_mask' value='"0x41"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ salt-call reclass.cluster_meta_set name='compute_ovs_pmd_cpu_mask' value='"0xe"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_socket_mem' value='"512,512"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
salt-call reclass.cluster_meta_set name='compute_hugepages_size' value='"2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
salt-call reclass.cluster_meta_set name='compute_hugepages_mount' value='"/mnt/hugepages_2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # set virtual disks for compute
- sed -i 's/cinder_lvm_devices: \[ "\/dev\/sdb" \]/cinder_lvm_devices: \[ "\/dev\/vdb" \]/g' /srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/lvm_backend/init.yml
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
cat << 'EOF' >> /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/dpdk.yml
@@ -59,4 +63,13 @@
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
index 988f469..d4377b7 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
@@ -20,24 +20,21 @@
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- # Start compute node addresses from .105 , as in static models
- sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl-barbican.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl-barbican.yaml
new file mode 100644
index 0000000..4b86b85
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl-barbican.yaml
@@ -0,0 +1,51 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-pike-dvr-ssl-barbican' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl.yaml
new file mode 100644
index 0000000..c7de965
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl.yaml
@@ -0,0 +1,51 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-pike-dvr-ssl' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-dvr-ssl.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
index 4d672b3..2f19cd5 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
@@ -21,37 +21,24 @@
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- # Start compute node addresses from .105 , as in static models
- sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
- # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # Workaround of missing reclass.system for dns role
- # salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # Add cinder volume on cmp nodes. PROD-20945
- reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
- reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
- reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
- reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
index 5b2bacd..ed3a6c9 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
@@ -21,36 +21,24 @@
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
- # Start compute node addresses from .105 , as in static models
- sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
- # Bind9 services are placed on the first two ctl nodes
- # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # Add cinder volume on cmp nodes. PROD-20945
- reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
- reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
- reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
- reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml
new file mode 100644
index 0000000..657e7c2
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml
@@ -0,0 +1,54 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-queens-dvr-ceph' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-dvr-ceph.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['vcp-context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary workaround !! Fix or debug
+ cmd: |
+ sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+ sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
+
+
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl-barbican.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl-barbican.yaml
new file mode 100644
index 0000000..4e5dbc9
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl-barbican.yaml
@@ -0,0 +1,51 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-queens-dvr-ssl-barbican' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl.yaml
new file mode 100644
index 0000000..bf6683d
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl.yaml
@@ -0,0 +1,51 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-queens-dvr-ssl' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-queens-dvr-ssl.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
index 6abc5d0..1e50429 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
@@ -21,37 +21,20 @@
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- # Start compute node addresses from .105 , as in static models
- sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
- # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # Workaround of missing reclass.system for dns role
- # salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # Add cinder volume on cmp nodes. PROD-20945
- reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
- reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
- reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
- reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
index 92a8bdb..7e2d2de 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
@@ -21,37 +21,20 @@
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
- # Start compute node addresses from .105 , as in static models
- sed -i 's/\.101/\.105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
- # Bind9 services are placed on the first two ctl nodes
- # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- # Add cinder volume on cmp nodes. PROD-20945
- reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
- reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
- reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
- reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -64,11 +47,4 @@
retry: {count: 1, delay: 5}
skip_fail: true
-- description: Temporary workaround
- cmd: |
- apt install -y python-netaddr;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
index 7bd7a02..695e537 100644
--- a/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
@@ -18,10 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- #- cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- - cloud-init-per once sudo ifdown ens4
-
# Enable root access
- cloud-init-per once sudo sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- cloud-init-per once sudo service sshd restart
@@ -32,26 +28,21 @@
runcmd:
# Prepare network connection
- sudo ifdown ens3
+ - sudo ifdown ens4
+ - sudo ip r d default || true # remove existing default route to get it from dhcp
- sudo ifup ens3
+ - sudo ifup ens4
#- sudo route add default gw {gateway} {interface_name}
- # Purge the unattended-upgrades package (Workaround for PROD-17904, PROD-18736)"
- - echo "APT::Periodic::Update-Package-Lists 0;" > /etc/apt/apt.conf.d/99dont_update_package_list-salt
- - echo "APT::Periodic::Download-Upgradeable-Packages 0;" > /etc/apt/apt.conf.d/99dont_update_download_upg_packages-salt
- - echo "APT::Periodic::Unattended-Upgrade 0;" > /etc/apt/apt.conf.d/99disable_unattended_upgrade-salt
- - apt-get -y purge unattended-upgrades
- # Stop currently running apt-daily service, source: https://unix.stackexchange.com/a/315517
- - systemctl stop apt-daily.service
- - systemctl kill --kill-who=all apt-daily.service
- - while ! (systemctl list-units --all apt-daily.service | fgrep -q dead); do sleep 1; done
-
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
# Enable grub menu using updated config below
- update-grub
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
# Create swap
#- fallocate -l 16G /swapfile
#- chmod 600 /swapfile
@@ -62,26 +53,10 @@
############## TCP Cloud cfg01 node ##################
- echo "Preparing base OS"
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - apt-get clean
- - apt-get update
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
# Ensure that the salt-master service is ready to receive requests
- salt-key -y -D
- - service salt-master restart
- - service salt-minion restart
- - apt-get install -y salt-formula-*
- - for f in $(ls -1 /usr/share/salt-formulas/reclass/service); do ln -s /usr/share/salt-formulas/reclass/service/$f /srv/salt/reclass/classes/service/ || true; done
- - salt-call --timeout=180 test.ping
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- - sudo ifup ens4
- ########################################################
-
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml b/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
index b231ced..e2b63dd 100644
--- a/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
+++ b/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
@@ -72,7 +72,7 @@
{%- if os_env('IRONIC_DNSMASQ_HOSTFILE', '') %}
- echo "dhcp-hostsfile=/var/lib/libvirt/dnsmasq/{{ IRONIC_ENV_NAME }}.hostsfile" >> /etc/dnsmasq.conf
- - service dnsmasq restart
+ - service dnsmasq restart && sleep 30
{%- endif %}
# Enable SNAT to allow internet access for deploying nodes using ironic node as a gateway
diff --git a/tcp_tests/templates/k8s-ha-calico/k8s.yaml b/tcp_tests/templates/k8s-ha-calico/k8s.yaml
index a3f228e..88075db 100644
--- a/tcp_tests/templates/k8s-ha-calico/k8s.yaml
+++ b/tcp_tests/templates/k8s-ha-calico/k8s.yaml
@@ -45,7 +45,7 @@
- description: Register addons
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master and *01*' state.sls kubernetes.master.setup
+ -C 'I@kubernetes:master' state.sls kubernetes.master.setup
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/k8s-ha-calico/salt.yaml b/tcp_tests/templates/k8s-ha-calico/salt.yaml
index d1ce14a..b066fc2 100644
--- a/tcp_tests/templates/k8s-ha-calico/salt.yaml
+++ b/tcp_tests/templates/k8s-ha-calico/salt.yaml
@@ -2,7 +2,7 @@
{% from 'k8s-ha-calico/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'k8s-ha-calico/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/k8s-ha-calico/underlay--user-data-cfg01.yaml b/tcp_tests/templates/k8s-ha-calico/underlay--user-data-cfg01.yaml
index 7f8b8ec..dc9f8cd 100644
--- a/tcp_tests/templates/k8s-ha-calico/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/k8s-ha-calico/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/k8s-ha-calico/underlay.yaml b/tcp_tests/templates/k8s-ha-calico/underlay.yaml
index eef7bb8..ac11a62 100644
--- a/tcp_tests/templates/k8s-ha-calico/underlay.yaml
+++ b/tcp_tests/templates/k8s-ha-calico/underlay.yaml
@@ -145,9 +145,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -186,9 +183,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -215,9 +209,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
diff --git a/tcp_tests/templates/k8s-ha-contrail/salt.yaml b/tcp_tests/templates/k8s-ha-contrail/salt.yaml
index 8e83628..086dc83 100644
--- a/tcp_tests/templates/k8s-ha-contrail/salt.yaml
+++ b/tcp_tests/templates/k8s-ha-contrail/salt.yaml
@@ -2,7 +2,7 @@
{% from 'k8s-ha-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'k8s-ha-contrail/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml
index 48577ab..6076ffa 100644
--- a/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -65,11 +63,12 @@
# Install common packages
- eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
write_files:
- path: /etc/network/interfaces
diff --git a/tcp_tests/templates/k8s-ha-contrail/underlay.yaml b/tcp_tests/templates/k8s-ha-contrail/underlay.yaml
index 2551d12..b87f888 100644
--- a/tcp_tests/templates/k8s-ha-contrail/underlay.yaml
+++ b/tcp_tests/templates/k8s-ha-contrail/underlay.yaml
@@ -200,9 +200,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 20
backing_store: cloudimage1604
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -247,9 +244,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 20
backing_store: cloudimage1604
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -276,9 +270,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 20
backing_store: cloudimage1604
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data-apt01.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data-apt01.yaml
index 017b944..3f4c1a2 100644
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data-apt01.yaml
+++ b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data-apt01.yaml
@@ -84,7 +84,7 @@
- find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_MCP_MIRANTIS==/{{ os_env('HOST_MIRROR_MCP_MIRANTIS', 'mirror.mcp.mirantis.net') }}/g" {} +
- find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_FUEL_INFRA==/{{ os_env('HOST_MIRROR_FUEL_INFRA', 'mirror.fuel-infra.org') }}/g" {} +
- find /srv/pillar/ -type f -exec sed -i "s/==HOST_PPA_LAUNCHPAD==/{{ os_env('HOST_PPA_LAUNCHPAD', 'ppa.launchpad.net') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.net') }}/g" {} +
+ - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.com') }}/g" {} +
- salt-call --local --state-output=mixed state.sls dnsmasq;
- salt-call --local --state-output=mixed state.sls nginx;
########################################################
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml
index 19cc801..8faef2b 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml
@@ -26,7 +26,7 @@
skip_fail: false
- description: MaaS auth
- cmd: maas logout mirantis && maas login mirantis http://localhost:5240/MAAS/api/2.0/ 'FTvqwe7ybBp68gPar2:5mcctTAXVL8mns4ef4:zrA9LZwu2tMc8BAZpsPUfwWwTyQnAtDN'
+ cmd: bash -x /var/lib/maas/.maas_login.sh
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml
index b850283..8e4d506 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml
@@ -41,16 +41,20 @@
- ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3
- ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4
- ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
- - cd /root/config-drive && /bin/bash -xe ./user-data
+ - cp /root/config-drive/user-data /root/config.sh && chmod 664 /root/config.sh
+ - sed -i '/^reboot$/d' /root/config.sh
+ - cd /root && /bin/bash -xe ./config.sh
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - systemctl reload sshd
########################################################
- # Node is ready, allow SSH access
+ # Node is ready, allow SSH access and reboot
- echo "Allow SSH access ..."
- "sed -i -e '/sshd:ALL/d' /etc/hosts.deny"
- touch /is_cloud_init_finish
+ - reboot
########################################################
write_files:
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
index a3c7284..0228ee8 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
@@ -14,11 +14,13 @@
{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM04', 'kvm04.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM05', 'kvm05.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM06', 'kvm06.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM04 = os_env('HOSTNAME_KVM04', 'kvm04.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM05 = os_env('HOSTNAME_KVM05', 'kvm05.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM06 = os_env('HOSTNAME_KVM06', 'kvm06.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP003 = os_env('HOSTNAME_CMP003', 'cmp003.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP004 = os_env('HOSTNAME_CMP004', 'cmp004.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW03 = os_env('HOSTNAME_GTW03', 'gtw03.' + DOMAIN_NAME) %}
@@ -50,8 +52,16 @@
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX02 = os_env('HOSTNAME_PRX02', 'prx02.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX03 = os_env('HOSTNAME_PRX03', 'prx03.' + DOMAIN_NAME) %}
-
-
+{% set HOSTNAME_RGW = os_env('HOSTNAME_RGW', 'rgw.' + DOMAIN_NAME) %}
+{% set HOSTNAME_RGW01 = os_env('HOSTNAME_RGW01', 'rgw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_RGW02 = os_env('HOSTNAME_RGW02', 'rgw02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_RGW03 = os_env('HOSTNAME_RGW03', 'rgw03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_OSD001 = os_env('HOSTNAME_OSD001', 'osd001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_OSD002 = os_env('HOSTNAME_OSD002', 'osd002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_OSD003 = os_env('HOSTNAME_OSD003', 'osd003.' + DOMAIN_NAME) %}
{% set ETH0_IP_ADDRESS_APT = os_env('ETH0_IP_ADDRESS_ATP', '10.10.0.14') %}
{% set ETH0_IP_ADDRESS_CFG01 = os_env('ETH0_IP_ADDRESS_CFG01', '10.10.0.15') %}
@@ -78,9 +88,21 @@
{% set ETH0_IP_ADDRESS_KVM06 = os_env('ETH0_IP_ADDRESS_KVM06', '10.10.0.246') %}
{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '10.10.0.101') %}
{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '10.10.0.102') %}
+{% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '10.10.0.103') %}
+{% set ETH0_IP_ADDRESS_CMP004 = os_env('ETH0_IP_ADDRESS_CMP004', '10.10.0.104') %}
{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '10.10.0.224') %}
{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '10.10.0.225') %}
{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '10.10.0.226') %}
+{% set ETH0_IP_ADDRESS_RGW = os_env('ETH0_IP_ADDRESS_RGW', '10.10.0.75') %}
+{% set ETH0_IP_ADDRESS_RGW01 = os_env('ETH0_IP_ADDRESS_RGW01', '10.10.0.76') %}
+{% set ETH0_IP_ADDRESS_RGW02 = os_env('ETH0_IP_ADDRESS_RGW02', '10.10.0.77') %}
+{% set ETH0_IP_ADDRESS_RGW03 = os_env('ETH0_IP_ADDRESS_RGW03', '10.10.0.78') %}
+{% set ETH0_IP_ADDRESS_CMN01 = os_env('ETH0_IP_ADDRESS_CMN01', '10.10.0.66') %}
+{% set ETH0_IP_ADDRESS_CMN02 = os_env('ETH0_IP_ADDRESS_CMN02', '10.10.0.67') %}
+{% set ETH0_IP_ADDRESS_CMN03 = os_env('ETH0_IP_ADDRESS_CMN03', '10.10.0.68') %}
+{% set ETH0_IP_ADDRESS_OSD001 = os_env('ETH0_IP_ADDRESS_OSD001', '10.10.0.201') %}
+{% set ETH0_IP_ADDRESS_OSD002 = os_env('ETH0_IP_ADDRESS_OSD002', '10.10.0.202') %}
+{% set ETH0_IP_ADDRESS_OSD003 = os_env('ETH0_IP_ADDRESS_OSD003', '10.10.0.203') %}
{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '10.11.0.15') %}
@@ -110,6 +132,16 @@
{% set ETH1_IP_ADDRESS_GTW01 = os_env('ETH1_IP_ADDRESS_GTW01', '10.11.0.224') %}
{% set ETH1_IP_ADDRESS_GTW02 = os_env('ETH1_IP_ADDRESS_GTW02', '10.11.0.225') %}
{% set ETH1_IP_ADDRESS_GTW02 = os_env('ETH1_IP_ADDRESS_GTW02', '10.11.0.226') %}
+{% set ETH1_IP_ADDRESS_RGW = os_env('ETH1_IP_ADDRESS_RGW', '10.11.0.75') %}
+{% set ETH1_IP_ADDRESS_RGW01 = os_env('ETH1_IP_ADDRESS_RGW01', '10.11.0.76') %}
+{% set ETH1_IP_ADDRESS_RGW02 = os_env('ETH1_IP_ADDRESS_RGW02', '10.11.0.77') %}
+{% set ETH1_IP_ADDRESS_RGW03 = os_env('ETH1_IP_ADDRESS_RGW03', '10.11.0.78') %}
+{% set ETH1_IP_ADDRESS_CMN01 = os_env('ETH1_IP_ADDRESS_CMN01', '10.11.0.66') %}
+{% set ETH1_IP_ADDRESS_CMN02 = os_env('ETH1_IP_ADDRESS_CMN02', '10.11.0.67') %}
+{% set ETH1_IP_ADDRESS_CMN03 = os_env('ETH1_IP_ADDRESS_CMN03', '10.11.0.68') %}
+{% set ETH1_IP_ADDRESS_OSD001 = os_env('ETH1_IP_ADDRESS_OSD001', '10.11.0.201') %}
+{% set ETH1_IP_ADDRESS_OSD002 = os_env('ETH1_IP_ADDRESS_OSD002', '10.11.0.202') %}
+{% set ETH1_IP_ADDRESS_OSD002 = os_env('ETH1_IP_ADDRESS_OSD002', '10.11.0.203') %}
{% set ETH2_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.44.33') %}
@@ -145,12 +177,15 @@
default_{{ HOSTNAME_KVM06 }}: {{ ETH1_IP_ADDRESS_KVM06 }}
default_{{ HOSTNAME_CMP001 }}: {{ ETH1_IP_ADDRESS_CMP001 }}
default_{{ HOSTNAME_CMP002 }}: {{ ETH1_IP_ADDRESS_CMP002 }}
+ default_{{ HOSTNAME_CMP003 }}: {{ ETH1_IP_ADDRESS_CMP003 }}
+ default_{{ HOSTNAME_CMP004 }}: {{ ETH1_IP_ADDRESS_CMP004 }}
default_{{ HOSTNAME_GTW01 }}: {{ ETH1_IP_ADDRESS_GTW01 }}
default_{{ HOSTNAME_GTW02 }}: {{ ETH1_IP_ADDRESS_GTW02 }}
+ default_{{ HOSTNAME_GTW03 }}: {{ ETH1_IP_ADDRESS_GTW03 }}
default_{{ HOSTNAME_CTL }}: {{ ETH1_IP_ADDRESS_CTL }}
- default_{{ HOSTNAME_CTL01 }}: {{ ETH1_IP_ADDRESS_CTL02 }}
- default_{{ HOSTNAME_CTL02 }}: {{ ETH1_IP_ADDRESS_CTL03 }}
- default_{{ HOSTNAME_CTL03 }}: {{ ETH1_IP_ADDRESS_CTL04 }}
+ default_{{ HOSTNAME_CTL01 }}: {{ ETH1_IP_ADDRESS_CTL01 }}
+ default_{{ HOSTNAME_CTL02 }}: {{ ETH1_IP_ADDRESS_CTL02 }}
+ default_{{ HOSTNAME_CTL03 }}: {{ ETH1_IP_ADDRESS_CTL03 }}
default_{{ HOSTNAME_MSG }}: {{ ETH1_IP_ADDRESS_MSG }}
default_{{ HOSTNAME_MSG01 }}: {{ ETH1_IP_ADDRESS_MSG02 }}
default_{{ HOSTNAME_MSG02 }}: {{ ETH1_IP_ADDRESS_MSG03 }}
@@ -160,17 +195,27 @@
default_{{ HOSTNAME_MON02 }}: {{ ETH1_IP_ADDRESS_MON02 }}
default_{{ HOSTNAME_MON03 }}: {{ ETH1_IP_ADDRESS_MON03 }}
default_{{ HOSTNAME_DBS }}: {{ ETH1_IP_ADDRESS_DBS }}
- default_{{ HOSTNAME_DBS01 }}: {{ ETH1_IP_ADDRESS_DBS02 }}
- default_{{ HOSTNAME_DBS02 }}: {{ ETH1_IP_ADDRESS_DBS03 }}
- default_{{ HOSTNAME_DBS03 }}: {{ ETH1_IP_ADDRESS_DBS04 }}
+ default_{{ HOSTNAME_DBS01 }}: {{ ETH1_IP_ADDRESS_DBS01 }}
+ default_{{ HOSTNAME_DBS02 }}: {{ ETH1_IP_ADDRESS_DBS02 }}
+ default_{{ HOSTNAME_DBS03 }}: {{ ETH1_IP_ADDRESS_DBS03 }}
default_{{ HOSTNAME_LOG }}: {{ ETH1_IP_ADDRESS_LOG }}
- default_{{ HOSTNAME_LOG01 }}: {{ ETH1_IP_ADDRESS_LOG02 }}
- default_{{ HOSTNAME_LOG02 }}: {{ ETH1_IP_ADDRESS_LOG03 }}
- default_{{ HOSTNAME_LOG03 }}: {{ ETH1_IP_ADDRESS_LOG04 }}
+ default_{{ HOSTNAME_LOG01 }}: {{ ETH1_IP_ADDRESS_LOG01 }}
+ default_{{ HOSTNAME_LOG02 }}: {{ ETH1_IP_ADDRESS_LOG02 }}
+ default_{{ HOSTNAME_LOG03 }}: {{ ETH1_IP_ADDRESS_LOG03 }}
default_{{ HOSTNAME_MTR }}: {{ ETH1_IP_ADDRESS_MTR }}
- default_{{ HOSTNAME_MTR01 }}: {{ ETH1_IP_ADDRESS_MTR02 }}
- default_{{ HOSTNAME_MTR02 }}: {{ ETH1_IP_ADDRESS_MTR03 }}
- default_{{ HOSTNAME_MTR03 }}: {{ ETH1_IP_ADDRESS_MTR04 }}
+ default_{{ HOSTNAME_MTR01 }}: {{ ETH1_IP_ADDRESS_MTR01 }}
+ default_{{ HOSTNAME_MTR02 }}: {{ ETH1_IP_ADDRESS_MTR02 }}
+ default_{{ HOSTNAME_MTR03 }}: {{ ETH1_IP_ADDRESS_MTR03 }}
+ default_{{ HOSTNAME_RGW }}: {{ ETH1_IP_ADDRESS_RGW }}
+ default_{{ HOSTNAME_RGW01 }}: {{ ETH1_IP_ADDRESS_RGW01 }}
+ default_{{ HOSTNAME_RGW02 }}: {{ ETH1_IP_ADDRESS_RGW02 }}
+ default_{{ HOSTNAME_RGW03 }}: {{ ETH1_IP_ADDRESS_RGW03 }}
+ default_{{ HOSTNAME_CMN01 }}: {{ ETH1_IP_ADDRESS_CMN01 }}
+ default_{{ HOSTNAME_CMN02 }}: {{ ETH1_IP_ADDRESS_CMN02 }}
+ default_{{ HOSTNAME_CMN03 }}: {{ ETH1_IP_ADDRESS_CMN03 }}
+ default_{{ HOSTNAME_OSD001 }}: {{ ETH1_IP_ADDRESS_OSD001 }}
+ default_{{ HOSTNAME_OSD002 }}: {{ ETH1_IP_ADDRESS_OSD002 }}
+ default_{{ HOSTNAME_OSD003 }}: {{ ETH1_IP_ADDRESS_OSD003 }}
admin-pool01:
net: {{ os_env('DEPLOY_ADDRESS_POOL01', '10.10.0.0/16:16') }}
@@ -214,7 +259,16 @@
default_{{ HOSTNAME_MTR01 }}: {{ ETH0_IP_ADDRESS_MTR02 }}
default_{{ HOSTNAME_MTR02 }}: {{ ETH0_IP_ADDRESS_MTR03 }}
default_{{ HOSTNAME_MTR03 }}: {{ ETH0_IP_ADDRESS_MTR04 }}
-
+ default_{{ HOSTNAME_RGW }}: {{ ETH0_IP_ADDRESS_RGW }}
+ default_{{ HOSTNAME_RGW01 }}: {{ ETH0_IP_ADDRESS_RGW01 }}
+ default_{{ HOSTNAME_RGW02 }}: {{ ETH0_IP_ADDRESS_RGW02 }}
+ default_{{ HOSTNAME_RGW03 }}: {{ ETH0_IP_ADDRESS_RGW03 }}
+ default_{{ HOSTNAME_CMN01 }}: {{ ETH0_IP_ADDRESS_CMN01 }}
+ default_{{ HOSTNAME_CMN02 }}: {{ ETH0_IP_ADDRESS_CMN02 }}
+ default_{{ HOSTNAME_CMN03 }}: {{ ETH0_IP_ADDRESS_CMN03 }}
+ default_{{ HOSTNAME_OSD001 }}: {{ ETH0_IP_ADDRESS_OSD001 }}
+ default_{{ HOSTNAME_OSD002 }}: {{ ETH0_IP_ADDRESS_OSD002 }}
+ default_{{ HOSTNAME_OSD003 }}: {{ ETH0_IP_ADDRESS_OSD003 }}
public-pool01:
net: {{ os_env('PUBLIC_ADDRESS_POOL01', '172.16.44.0/22:22') }}
diff --git a/tcp_tests/templates/runtest.yml b/tcp_tests/templates/runtest.yml
index cceaf8b..263dbb0 100644
--- a/tcp_tests/templates/runtest.yml
+++ b/tcp_tests/templates/runtest.yml
@@ -1,10 +1,11 @@
classes:
- service.runtest.tempest
+- service.runtest.tempest.services.manila.glance
parameters:
_param:
- runtest_tempest_cfg_dir: /root/test/
+ runtest_tempest_cfg_dir: /tmp/test/
runtest_tempest_cfg_name: tempest.conf
- runtest_tempest_public_net: net04_ext
+ runtest_tempest_public_net: public
tempest_test_target: gtw01*
neutron:
client:
@@ -19,25 +20,10 @@
convert_to_uuid:
network:
public_network_id: ${_param:runtest_tempest_public_net}
- network:
- floating_network_name: ${_param:runtest_tempest_public_net}
DEFAULT:
log_file: tempest.log
- heat_plugin:
- floating_network_name: ${_param:runtest_tempest_public_net}
compute:
- build_timeout: 600
- min_microversion: 2.1
- max_microversion: 2.53
min_compute_nodes: 2
- volume_device_name: 'vdc'
- dns_feature_enabled:
- api_admin: false
- api_v1: false
- api_v2: true
- api_v2_quotas: true
- api_v2_root_recordsets: true
- bug_1573141_fixed: true
share:
capability_snapshot_support: True
run_driver_assisted_migration_tests: False
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/ceph.yaml b/tcp_tests/templates/shared-ceph.yaml
similarity index 65%
rename from tcp_tests/templates/cookied-mcp-pike-dvr-ceph/ceph.yaml
rename to tcp_tests/templates/shared-ceph.yaml
index 7b9df3a..ab13cb2 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/ceph.yaml
+++ b/tcp_tests/templates/shared-ceph.yaml
@@ -1,10 +1,7 @@
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import DOMAIN_NAME with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
+{# Collection of common macroses shared across ceph and radosgw #}
-# Install ceph mons
+{%- macro MACRO_INSTALL_CEPH_MONS() %}
+
- description: Update grains
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@ceph:common' state.sls salt.minion.grains
@@ -39,6 +36,9 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_CEPH_MGR() %}
- description: Install ceph mgr if defined(needed only for Luminious)
cmd: |
@@ -48,6 +48,9 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_CEPH_OSD_AND_RADOSGW() %}
- description: Install ceph osd
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -102,67 +105,34 @@
cmd: |
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' saltutil.sync_grains;
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' state.sls ceph.radosgw;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keystone:client' state.sls keystone.client;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+{%- endmacro %}
+
+{%- macro CONNECT_CEPH_TO_SERVICES() %}
+- description: Setup keyring for glance
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
-- description: Install ceph clinet
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:setup' state.sls ceph.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{%- for ssh in config.underlay.ssh %}
- {%- set salt_roles = [] %}
- {%- for role in ssh['roles'] %}
- {%- if role in config.salt_deploy.salt_roles %}
- {%- set _ = salt_roles.append(role) %}
- {%- endif %}
- {%- endfor %}
-
- {%- if salt_roles %}
-- description: Restart salt-minion as workaround of PROD-16970
+- description: Setup keyring for cinder and nova
cmd: |
- service salt-minion restart; # For case if salt-minion was already installed
- node_name: {{ ssh['node_name'] }}
- retry: {count: 1, delay: 1}
- skip_fail: false
- {%- endif %}
-{%- endfor %}
-
-- description: Connect ceph to glance
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-api;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Connect ceph to cinder and nova
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring;
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls nova;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
-- description: Restart cinder volume
+- description: Setup keyring for gnocchi
cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@cinder:controller' service.restart cinder-volume;
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@gnocchi:server' match.pillar 'ceph:common' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@gnocchi:server' state.sls ceph.common,ceph.setup.keyring
+ fi
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
+ retry: {count: 1, delay: 5}
skip_fail: false
-
-- description: Restart nova-compute
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nova:compute' service.restart nova-compute;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
+{%- endmacro %}
diff --git a/tcp_tests/templates/shared-core.yaml b/tcp_tests/templates/shared-core.yaml
index 6099bc7..3b1a716 100644
--- a/tcp_tests/templates/shared-core.yaml
+++ b/tcp_tests/templates/shared-core.yaml
@@ -47,20 +47,45 @@
{%- endmacro %}
+{%- macro MACRO_INSTALL_KEEPALIVED() %}
+
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+{%- endmacro %}
+
{%- macro MACRO_INSTALL_GLUSTERFS() %}
- description: Install glusterfs
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service && sleep 20
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Setup glusterfs on primary controller
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server and *01*' state.sls glusterfs.server.setup -b 1
+ -C 'I@glusterfs:server:role:primary' state.sls glusterfs.server.setup -b 1
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 5, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on other nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 5}
skip_fail: false
- description: Check the gluster status
@@ -70,4 +95,134 @@
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Refresh pillar before glusterfs client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:client' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:client' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_RABBITMQ() %}
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_GALERA() %}
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_HAPROXY() %}
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_NGINX() %}
+
+- description: Update certificate files on nginx nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls salt.minion.cert
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 15}
+ skip_fail: false
+
+- description: Install nginx server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_MEMCACHED() %}
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{%- endmacro %}
+
+{%- macro MACRO_CHECK_VIP() %}
+- description: Check the VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+ echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
{%- endmacro %}
diff --git a/tcp_tests/templates/shared-openstack.yaml b/tcp_tests/templates/shared-openstack.yaml
index 7d5dcac..b48a611 100644
--- a/tcp_tests/templates/shared-openstack.yaml
+++ b/tcp_tests/templates/shared-openstack.yaml
@@ -3,7 +3,7 @@
{%- macro MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) %}
{%- if USE_ORCHESTRATE %}
- description: |
- Execute salt orchestration state to configure all needed
+ Execute salt orchestration state to configure all needed
prerequisites like creating SSH public key ant etc.
Workaround for PROD-22488, use for PROD-22535.
cmd: salt-run state.orchestrate keystone.orchestrate.deploy
@@ -40,20 +40,6 @@
retry: {count: 1, delay: 15}
skip_fail: false
-- description: Mount glusterfs.client volumes (resuires created 'keystone' system user)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls glusterfs.client -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Populate keystone services/tenants/admins
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:client' state.sls keystone.client
@@ -85,20 +71,6 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Mount glusterfs.client volumes (resuires created 'glusterfs' system user)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-#- description: Setup glance.client
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@glance:client' state.sls glance.client
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
- description: Check glance image-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C "I@keystone:server" cmd.run ". /root/keystonercv3;
@@ -109,6 +81,7 @@
{%- endmacro %}
{%- macro MACRO_INSTALL_NOVA() %}
+
- description: Install nova service on primary node
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C "I@nova:controller and *01*" state.sls nova.controller
@@ -138,6 +111,15 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
+
+- description: Create nova resources
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nova:client' match.pillar 'nova:client' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C "I@nova:client" state.sls nova.client
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
{%- endmacro %}
{%- macro MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) %}
@@ -164,11 +146,11 @@
skip_fail: false
{%- if INSTALL_VOLUME %}
-- description: Install cinder volume
+- description: Install cinder volume, PROD-24485 set retry 2
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@cinder:volume' state.sls cinder
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
{%- endif %}
@@ -238,17 +220,27 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
{%- endmacro %}
-{%- macro MACRO_INSTALL_DESIGNATE() %}
-# Note: deploy backend for designate firstly
+{%- macro MACRO_INSTALL_DESIGNATE(INSTALL_POWERDNS=false, INSTALL_BIND=false) %}
+ {%- if INSTALL_POWERDNS %}
+- description: Install powerdns
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@powerdns:server' state.sls powerdns.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+ {%- endif %}
+
+ {%- if INSTALL_BIND %}
+- description: Install bind
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@bind:server' state.sls bind
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+ {%- endif %}
+
- description: Install designate on primary node
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C "I@designate:server and *01*" state.sls designate.server
@@ -264,12 +256,28 @@
skip_fail: false
{%- endmacro %}
-{%- macro MACRO_INSTALL_CEILOMETER() %}
-# TO DO
+{%- macro MACRO_INSTALL_BARBICAN() %}
+
+- description: Install barbican server
+ cmd: |
+ salt -C 'I@barbican:server:role:primary' state.sls barbican.server;
+ salt -C 'I@barbican:server' state.sls barbican.server;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
{%- endmacro %}
-{%- macro MACRO_INSTALL_BARBICAN() %}
-# TO DO
+{%- macro MACRO_INSTALL_DOGTAG() %}
+
+- description: Install dogtag server
+ cmd: |
+ salt -C 'I@dogtag:server:role:master' state.sls dogtag.server;
+ salt -C 'I@dogtag:server' state.sls dogtag.server;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
{%- endmacro %}
{%- macro MACRO_INSTALL_IRONIC() %}
@@ -277,26 +285,114 @@
{%- endmacro %}
{%- macro MACRO_INSTALL_MANILA() %}
-# TO DO
+- description: Install manila-api on first node
+ cmd: |
+ salt -C 'I@manila:api and *01*' state.sls manila.api;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install manila-scheduler
+ cmd: |
+ salt -C 'I@manila:scheduler' state.sls manila.scheduler;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install manila-share
+ cmd: |
+ salt -C 'I@manila:share' state.sls manila.share;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check manila-services
+ cmd: |
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; manila service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 15}
+ skip_fail: false
+
+- description: Create manila type
+ cmd: |
+ salt -C "I@manila:client" state.sls manila.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Create CIFS and NFS share and check it status
+ cmd: |
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create CIFS 1 --share-type=default';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create NFS 1 --share-type=default';
+ sleep 5;
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; manila list';
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
{%- endmacro %}
{%- macro MACRO_INSTALL_OCTAVIA_API() %}
-# TO DO
+
+- description: Execute glance client to upload octavia image
+ cmd: salt -C 'I@glance:client' state.sls glance.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Execute neutron client to create octavia resources
+ cmd: salt -C 'I@neutron:client' state.sls neutron.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install octavia api service on primary node
+ cmd: salt -C 'I@octavia:api:role:primary' state.sls octavia.api
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install octavia api service
+ cmd: salt -C 'I@octavia:api' state.sls octavia.api
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
{%- endmacro %}
-{%- macro MACRO_INSTALL_DOGTAG() %}
-# TO DO
+{%- macro MACRO_INSTALL_OCTAVIA_MANAGER() %}
+- description: Update mine
+ cmd: salt -C 'I@neutron:client' mine.update && sleep 60
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install octavia manager
+ cmd: salt -C 'I@octavia:manager' state.sls octavia.manager
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Execute octavia ca
+ cmd: salt -C 'I@octavia:manager' state.sls salt.minion.ca
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Execute octavia cert
+ cmd: salt -C 'I@octavia:manager' state.sls salt.minion.cert
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Execute octavia client
+ cmd: salt -C 'I@octavia:client' state.sls octavia.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
{%- endmacro %}
-{%- macro MACRO_INSTALL_GNOCCHI() %}
-# TO DO
-{%- endmacro %}
-
-{%- macro MACRO_INSTALL_AODH() %}
-# TO DO
-{%- endmacro %}
-
-{%- macro MACRO_INSTALL_COMPUTE() %}
+{%- macro MACRO_INSTALL_COMPUTE(CELL_MAPPING=false) %}
# Install compute node
- description: Apply formulas for compute node
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
@@ -316,4 +412,90 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 10, delay: 30}
skip_fail: false
+
+ {%- if CELL_MAPPING %}
+- description: Re-run nova for map cell mapping
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@nova:controller and *01*" state.sls nova.controller
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+ {%- endif %}
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_REDIS() %}
+- description: Install redis service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@redis:cluster:role:master' state.sls redis &&
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@redis:server' state.sls redis
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_GNOCCHI() %}
+- description: Install gnocchi server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:server and *01*' state.sls gnocchi.server &&
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:server' state.sls gnocchi.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup gnocchi client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:client and *01*' state.sls gnocchi.client &&
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:client' state.sls gnocchi.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_PANKO() %}
+- description: Install panko server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@panko:server and *01*' state.sls panko &&
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@panko:server' state.sls panko
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_CEILOMETER() %}
+- description: Install ceilometer server on first node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceilometer:server and *01*' state.sls ceilometer
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Install ceilometer server on other nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceilometer:server' state.sls ceilometer
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_AODH() %}
+- description: Install aodh server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aodh:server and *01*' state.sls aodh &&
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aodh:server' state.sls aodh
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+{%- endmacro %}
+
+{%- macro OVERRIDE_POLICY() %}
+- description: Upload policy override
+ upload:
+ local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+ local_filename: overrides-policy.yml
+ remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
+ node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Create custom cluster control class
+ cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
+ node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Rename control classes
+ cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
+ ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
{%- endmacro %}
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 88fef31..dcbf3af 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -5,11 +5,13 @@
{# Reference to a patch that should be applied to the model if required, for example: export SALT_MODELS_REF_CHANGE=refs/changes/19/7219/12 #}
{% set SALT_MODELS_REF_CHANGE = os_env('SALT_MODELS_REF_CHANGE', '') %}
{# Pin to a specified commit in salt-models/reclass-system #}
-{% set SALT_MODELS_SYSTEM_REPOSITORY = os_env('SALT_MODELS_SYSTEM_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/reclass-system') %}
+{% set SALT_MODELS_SYSTEM_REPOSITORY = os_env('SALT_MODELS_SYSTEM_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/reclass-system') %}
{% set SALT_MODELS_SYSTEM_COMMIT = os_env('SALT_MODELS_SYSTEM_COMMIT','') %}
{% set SALT_MODELS_SYSTEM_REF_CHANGE = os_env('SALT_MODELS_SYSTEM_REF_CHANGE','') %}
{% set SALT_MODELS_SYSTEM_TAG = os_env('SALT_MODELS_SYSTEM_TAG','') %}
-{% set COOKIECUTTER_TEMPLATES_REPOSITORY = os_env('COOKIECUTTER_TEMPLATES_REPOSITORY','https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates') %}
+{% set COOKIECUTTER_TEMPLATES_REPOSITORY_USER = os_env('COOKIECUTTER_TEMPLATES_REPOSITORY_USER','mcp-gerrit') %}
+{% set COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH = os_env('COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH','') %}
+{% set COOKIECUTTER_TEMPLATES_REPOSITORY = os_env('COOKIECUTTER_TEMPLATES_REPOSITORY','ssh://' + COOKIECUTTER_TEMPLATES_REPOSITORY_USER +'@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates') %}
{% set COOKIECUTTER_REF_CHANGE = os_env('COOKIECUTTER_REF_CHANGE','') %}
{% set COOKIECUTTER_TAG = os_env('COOKIECUTTER_TAG','') %}
{% set COOKIECUTTER_TEMPLATE_COMMIT = os_env('COOKIECUTTER_TEMPLATE_COMMIT','') %}
@@ -51,7 +53,7 @@
{% set SALT_FORMULAS_REFS = os_env('SALT_FORMULAS_REFS', '') %}
{% set TEMPEST_PATTERN = os_env('TEMPEST_PATTERN', 'tempest') %}
{% set EXCLUDE_TEST_ARGS = os_env('EXCLUDE_TEST_ARGS', '') %}
-{% set SALT_FORMULAS_REPO = os_env('SALT_FORMULAS_REPO', 'https://gerrit.mcp.mirantis.net/salt-formulas') %}
+{% set SALT_FORMULAS_REPO = os_env('SALT_FORMULAS_REPO', 'https://gerrit.mcp.mirantis.com/salt-formulas') %}
# Needed for using different models in different templates
{% set CLUSTER_NAME = os_env('CLUSTER_NAME', LAB_CONFIG_NAME) %}
@@ -365,12 +367,23 @@
retry: {count: 1, delay: 1}
skip_fail: false
+- description: "Upload {{ COOKIECUTTER_TEMPLATES_REPOSITORY_USER }} key"
+ upload:
+ local_path: {{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | dirname }}/
+ local_filename: {{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | basename }}
+ remote_path: /tmp/
+ node_name: {{ HOSTNAME_CFG01 }}
+
- description: Create cluster model from cookiecutter templates
cmd: |
set -e;
set -x;
sudo apt-get install python-setuptools -y
pip install cookiecutter
+
+ chmod 0600 /tmp/{{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | basename }}
+ eval $(ssh-agent)
+ ssh-add /tmp/{{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | basename }}
export GIT_SSL_NO_VERIFY=true; git clone {{ COOKIECUTTER_TEMPLATES_REPOSITORY }} /root/cookiecutter-templates
{%- if COOKIECUTTER_REF_CHANGE != '' %}
@@ -519,6 +532,29 @@
node_name: {{ HOSTNAME_CFG01 }}
{%- endfor %}
+- description: "Replace template addresses to actual environment addresses"
+ cmd: |
+ set -ex;
+ # Replace firstly to an intermediate value to avoid intersection between
+ # already replaced and replacing networks.
+ # For example, if generated IPV4_NET_ADMIN_PREFIX=10.16.0 , then there is a risk of replacing twice:
+ # 192.168.10 -> 10.16.0 (generated network for admin)
+ # 10.16.0 -> <external network>
+ # So let's replace constant networks to the keywords, and then keywords to the desired networks.
+ export REPLACE_DIRS="/root/environment/"
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.167\.5/==IPV4_NET_ADMIN_PREFIX==/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.167\.4/==IPV4_NET_CONTROL_PREFIX==/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.167\.6/==IPV4_NET_TENANT_PREFIX==/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/172\.17\.16/==IPV4_NET_EXTERNAL_PREFIX==/g' {} +
+
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}/g' {} +
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
- description: "[EXPERIMENTAL] Remove linux.network.interface object from the cluster/system models and use fixed 'environment' model instead"
cmd: |
set -e;
@@ -761,6 +797,12 @@
{# Prepare salt services and nodes settings #}
+- description: '*Workaround* of harcoded host from day01 grains'
+ cmd: salt-key -d cfg01.mcp-day01.local -y
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: true
+
- description: Run 'linux' formula on cfg01
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls linux;
node_name: {{ HOSTNAME_CFG01 }}
@@ -777,12 +819,6 @@
retry: {count: 3, delay: 5}
skip_fail: false
-- description: '*Workaround* of harcoded host from day01 grains'
- cmd: salt-key -d cfg01.mcp-day01.local -y
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: true
-
- description: '*Workaround* of the bug https://mirantis.jira.com/browse/PROD-7962'
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
'*' cmd.run "echo ' StrictHostKeyChecking no' >> /root/.ssh/config"
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml b/tcp_tests/templates/shared-sl.yaml
similarity index 74%
copy from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
copy to tcp_tests/templates/shared-sl.yaml
index 0b559a8..34c42f0 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
+++ b/tcp_tests/templates/shared-sl.yaml
@@ -1,5 +1,6 @@
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{# Collection of sl macroses #}
+{%- macro MACRO_INSTALL_DOCKER_SWARM() %}
# Install docker swarm
- description: Configure docker service
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
@@ -64,39 +65,43 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
+{%- endmacro %}
+#{#%- macro MACRO_INSTALL_GLUSTERFS_CLIENT() %#}
+#- description: Install glusterfs client
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+# -C 'I@glusterfs:client' state.sls glusterfs.client
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 2, delay: 15}
+# skip_fail: false
+#{#%- endmacro %#}
+
+{%- macro MACRO_INSTALL_MONGODB() %}
# Install slv2 infra
-#Launch containers
+# Install MongoDB for alerta
- description: Install Mongo if target matches
cmd: |
if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
+{%- endmacro %}
-- description: Configure Alerta if it is exists
+{%- macro MACRO_INSTALL_MONGODB_CLUSTER() %}
+# Create MongoDB cluster
+- description: Install Mongo if target matches
cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
fi
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
+ retry: {count: 5, delay: 20}
skip_fail: false
+{%- endmacro %}
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
+{%- macro MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() %}
- description: Install telegraf
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
node_name: {{ HOSTNAME_CFG01 }}
@@ -111,21 +116,35 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() %}
+- description: Install elasticsearch server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true and *01*' state.sls elasticsearch.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true' state.sls elasticsearch.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true and *01*' state.sls kibana.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true' state.sls kibana.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 30}
skip_fail: false
@@ -141,24 +160,15 @@
INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server and *01*' state.sls influxdb;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb;
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
+{%- endmacro %}
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
+{%- macro MACRO_INSTALL_LOG_COLLECTION() %}
# Install service for the log collection
- description: Configure fluentd
cmd: |
@@ -172,8 +182,10 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
+{%- endmacro %}
-#Install heka ceilometer collector
+{%- macro MACRO_INSTALL_CEILOMETER_COLLECTOR() %}
+# Install heka ceilometer collector
- description: Install heka ceilometer if they exists
cmd: |
CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
@@ -185,32 +197,23 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
+{%- endmacro %}
+{%- macro MACRO_CONFIGURE_SERVICES() %}
# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+- description: Collect Grains
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
+- description: Check docker ps
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
+ retry: {count: 2, delay: 10}
skip_fail: false
- description: Configure Remote Collector in Docker Swarm for Openstack deployments
@@ -219,39 +222,52 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+- description: launch prometheus containers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+- description: Check docker ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
+
+- description: "check grafana service"
+ cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:server' cmd.run
+ 'export SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ curl -sf http://${SL_VIP}:15013/;'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Configure Grafana dashboards and datasources
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 60}
+ skip_fail: false
+
+- description: "check grafana service"
+ cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:server' cmd.run
+ 'export SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ curl -sf http://${SL_VIP}:15013/;'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Configure Alerta if it is exists
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Run salt minion to create cert files
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion.cert
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+{%- endmacro %}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml
index 69772ac..9710531 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml
@@ -83,7 +83,7 @@
- find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_MCP_MIRANTIS==/{{ os_env('HOST_MIRROR_MCP_MIRANTIS', 'mirror.mcp.mirantis.net') }}/g" {} +
- find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_FUEL_INFRA==/{{ os_env('HOST_MIRROR_FUEL_INFRA', 'mirror.fuel-infra.org') }}/g" {} +
- find /srv/pillar/ -type f -exec sed -i "s/==HOST_PPA_LAUNCHPAD==/{{ os_env('HOST_PPA_LAUNCHPAD', 'ppa.launchpad.net') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.net') }}/g" {} +
+ - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.com') }}/g" {} +
- salt-call --local --state-output=mixed state.sls dnsmasq;
- salt-call --local --state-output=mixed state.sls nginx;
########################################################
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml
index 79adad5..800a0b1 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -48,15 +46,12 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- - echo "Preparing base OS"
- - sleep 160;
- # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
write_files:
- path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/core.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/core.yaml
deleted file mode 100644
index 9d07fb0..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/core.yaml
+++ /dev/null
@@ -1,100 +0,0 @@
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
deleted file mode 100644
index c73cfed..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/virtual-mcp-ocata-dvr/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
-- description: Override cluster parameters
- cmd: |
- salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endfor %}
-
-- description: Refresh pillar
- cmd: salt '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endif %}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-# WORKAROUND PROD-21071
-- description: Set correct pin for openstack repository
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin: release l=ocata/Pin: release l=xenial\/openstack\/ocata testing/g' /etc/apt/preferences.d/mirantis_openstack"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/salt.yaml
index 16af75a..350be48 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/salt.yaml
@@ -2,7 +2,7 @@
{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/core.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/core.yaml
deleted file mode 100644
index 85afc89..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/core.yaml
+++ /dev/null
@@ -1,100 +0,0 @@
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
index e55c9f8..e2573e8 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
@@ -168,4 +168,3 @@
skip_fail: false
{{ BACKUP.MACRO_BACKUP_CEPH() }}
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/core.yaml
index 7ac814c..3a24e5e 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/core.yaml
@@ -1,118 +1,12 @@
{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
index 18f18ee..13c3e13 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
@@ -13,7 +13,7 @@
# Install OpenStack control services
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -61,62 +61,6 @@
retry: {count: 10, delay: 30}
skip_fail: false
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
{{ BACKUP.MACRO_WR_NGINX_MASTER() }}
{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml
index cf418f5..89c3882 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml
@@ -2,7 +2,7 @@
{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-cookiecutter-mcp-pike-dvr.yaml
index e2ba165..9e0598d 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-cookiecutter-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-cookiecutter-mcp-pike-dvr.yaml
@@ -13,7 +13,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 192.168.10.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
@@ -116,7 +116,7 @@
salt_master_address: 172.16.10.90
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
stacklight_enabled: 'True'
stacklight_log_address: 172.16.10.70
stacklight_log_hostname: mon
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cfg01_configure.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cfg01_configure.yaml
index 00577bf..bcbfec4 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cfg01_configure.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cfg01_configure.yaml
@@ -5,7 +5,7 @@
{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
{% set CLUSTER_NAME = os_env('CLUSTER_NAME', LAB_CONFIG_NAME) %}
# Other salt model repository parameters see in shared-salt.yaml
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml
index 965d297..5c35319 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml
@@ -1,118 +1,12 @@
{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/openstack.yaml
index 9de0459..94a72e3 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/openstack.yaml
@@ -32,7 +32,7 @@
node_name: {{ HOSTNAME_CFG01 }}
{%- endif %}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml
index be74a88..63fb199 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml
@@ -46,6 +46,13 @@
- echo "nameserver 172.18.176.6" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/core.yaml
index 3ec4687..a2d4be8 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/core.yaml
@@ -1,118 +1,12 @@
{% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
index 1d04d4d..2b791b2 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
@@ -28,7 +28,7 @@
retry: {count: 1, delay: 5}
skip_fail: true
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -120,62 +120,6 @@
retry: {count: 10, delay: 30}
skip_fail: false
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
- description: Install manila-api on first node
cmd: |
salt -C 'I@manila:api and *01*' state.sls manila.api;
@@ -235,5 +179,3 @@
retry: {count: 3, delay: 5}
skip_fail: false
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/salt.yaml
index 937987b..bb4c5e4 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/salt.yaml
@@ -5,7 +5,7 @@
{% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/core.yaml
index 57968ee..49b016a 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/core.yaml
@@ -1,117 +1,12 @@
{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
index ceffe32..dcc854e 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
@@ -48,7 +48,7 @@
retry: {count: 1, delay: 5}
skip_fail: true
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -103,62 +103,6 @@
retry: {count: 10, delay: 30}
skip_fail: false
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
- description: Install manila-api on first node
cmd: |
salt -C 'I@manila:api and *01*' state.sls manila.api;
@@ -218,5 +162,3 @@
retry: {count: 3, delay: 5}
skip_fail: false
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/salt.yaml
index 3d66dcd..6729010 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/salt.yaml
@@ -5,7 +5,7 @@
{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/core.yaml
index cbde3f0..5c35319 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/core.yaml
@@ -1,117 +1,12 @@
{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
index db79c12..05fe6ee 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
@@ -35,7 +35,7 @@
node_name: {{ HOSTNAME_CFG01 }}
{%- endif %}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -98,62 +98,6 @@
retry: {count: 10, delay: 30}
skip_fail: false
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.180'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
- description: Install manila-api on first node
cmd: |
salt -C 'I@manila:api and *01*' state.sls manila.api;
@@ -216,4 +160,3 @@
{{ BACKUP.MACRO_WR_NGINX_MASTER() }}
{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
index 4beeb41..e027b64 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
@@ -2,7 +2,7 @@
{% from 'virtual-mcp-pike-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-mcp-pike-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
index c97a270..887d5eb 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
@@ -167,5 +167,3 @@
retry: {count: 2, delay: 5}
skip_fail: false
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/core.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/core.yaml
index c0e9b0e..3aed7e6 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/core.yaml
@@ -1,117 +1,12 @@
{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
index 1387114..f2a71cd 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
@@ -11,7 +11,7 @@
# Install OpenStack control services
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -59,61 +59,3 @@
retry: {count: 10, delay: 30}
skip_fail: false
-
- # Upload cirros image
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/salt.yaml
index 0ffdab0..684c535 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/salt.yaml
@@ -2,7 +2,7 @@
{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/core.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/core.yaml
index 368f2bf..cf4a90a 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/core.yaml
@@ -1,117 +1,12 @@
{% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/openstack.yaml
index 1b05fc8..1579920 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/openstack.yaml
@@ -10,7 +10,7 @@
# Install OpenStack control services
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/salt.yaml
index 4efcbd3..f6cc4c7 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/salt.yaml
@@ -6,7 +6,7 @@
{% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import DOMAIN_NAME with context %}
{% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import HOSTNAME_VSWITCH with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
@@ -14,6 +14,8 @@
{% set VSWITCH_IP = SHARED.IPV4_NET_CONTROL_PREFIX+'.178' %}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_VSWITCH) }}
+
{{ VSWITCH.MACRO_CONFIGURE_VSWITCH(HOSTNAME_VSWITCH, VSWITCH_IP) }}
{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/core.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/core.yaml
index abaa50d..af8778d 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/core.yaml
@@ -1,117 +1,12 @@
{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
index 54514ff..ea91bad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
@@ -13,7 +13,7 @@
{% import 'shared-backup-restore.yaml' as BACKUP with context %}
# Install OpenStack control services
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -124,62 +124,6 @@
retry: {count: 10, delay: 30}
skip_fail: false
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.180'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
- description: Install manila-api on first node
cmd: |
salt -C 'I@manila:api and *01*' state.sls manila.api;
@@ -242,4 +186,3 @@
{{ BACKUP.MACRO_WR_NGINX_MASTER() }}
{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
index 00da72e..04a3e30 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
@@ -2,7 +2,7 @@
{% from 'virtual-mcp-pike-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-mcp-pike-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
index a15946f..a1b2c92 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
@@ -2,7 +2,7 @@
{% from 'virtual-mcp-sl-os/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-mcp-sl-os/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/cluster/overrides.yml') %}
@@ -21,7 +21,7 @@
{%- if OVERRIDES != '' %}
{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
- description: Override cluster parameters
cmd: |
salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml
index a8afd05..1018c28 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml
@@ -47,6 +47,13 @@
# Enable grub menu using updated config below
- update-grub
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
content: |
diff --git a/tcp_tests/templates/virtual-mcp-trusty/Readme.txt b/tcp_tests/templates/virtual-mcp-trusty/Readme.txt
index fc9f978..da47d0b 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/Readme.txt
+++ b/tcp_tests/templates/virtual-mcp-trusty/Readme.txt
@@ -2,4 +2,14 @@
- virtual-mcp-mitaka-dvr-trusty
- virtual-mcp-mitaka-ovs-trusty
-Used by maintenance team.
\ No newline at end of file
+Used by maintenance team.
+
+Use following env vars should be used:
+SALT_MODELS_COMMIT = 'fa85f84'
+SALT_MODELS_SYSTEM_TAG = '2018.8.0'
+REPOSITORY_SUITE = '2018.8.0'
+OVERRIDES = 'openstack_log_appender: true
+linux_system_repo_mk_openstack_version: testing
+'
+
+Also VCP 2018.8.0 images should be used
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-trusty/core.yaml b/tcp_tests/templates/virtual-mcp-trusty/core.yaml
index 938b11f..a433aee 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-trusty/core.yaml
@@ -1,6 +1,9 @@
{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
+# vkhlyunev: shared steps are constantly updating due master development so
+# we cant use them for old release (e.g. new steps for gluster are using
+# glusterfs:server:role:primary pillar for targeting which does not exists in
+# 2018.8.0 release model (and we can't update the model)
# Install support services
- description: Install keepalived on ctl01
@@ -17,7 +20,33 @@
retry: {count: 1, delay: 10}
skip_fail: true
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service && sleep 20
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server and *01*' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on other nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status && gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
- description: Install RabbitMQ on ctl01
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
diff --git a/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml b/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
index 3833315..fff0966 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
@@ -6,6 +6,10 @@
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+# vkhlyunev: shared steps are constantly updating due master development so
+# we cant use them for old release. For openstack.yaml we can use some shared
+# steps for now but TODO: bind deployment workflow to 2018.8.0 state
+
# Install OpenStack control services
- description: Sync time
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -G 'oscodename:trusty' cmd.run "service ntp stop && ntpdate pool.ntp.org && service ntp start"
@@ -23,11 +27,94 @@
retry: {count: 1, delay: 5}
skip_fail: true
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+- description: Install keystone service on primary node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server and *01*' state.sls keystone.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Install keystone service on other nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Restart apache due to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*'
+ cmd.run "service apache2 restart"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Check apache status to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*'
+ cmd.run "service apache2 status"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' system user)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls glusterfs.client -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@keystone:server" cmd.run ". /root/keystonercv3;
+ openstack service list"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+- description: Install nova service on primary node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@nova:controller and *01*" state.sls nova.controller
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install nova service on other nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@nova:controller" state.sls nova.controller
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@keystone:server" cmd.run ". /root/keystonercv3;
+ openstack compute service list"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check nova list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@keystone:server" cmd.run ". /root/keystonercv3;
+ openstack server list"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
diff --git a/tcp_tests/templates/virtual-mcp-trusty/salt.yaml b/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
index 67695d9..d8f7fb7 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
@@ -2,7 +2,7 @@
{% from 'virtual-mcp-trusty/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-mcp-trusty/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
{% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
@@ -10,6 +10,15 @@
{% import 'shared-salt.yaml' as SHARED with context %}
+# vkhlyunev: sometimes we have to verify fixes to mitaka openstack based on
+# ubuntu trusty OS deployment. Last discovered deploy-able configuration is based on
+# mcp-virtual-lab/salt-formulas/reclass-system parameters/commits/tags listed below
+# SALT_MODELS_COMMIT = 'fa85f84'
+# SALT_MODELS_SYSTEM_TAG = '2018.8.0'
+# REPOSITORY_SUITE = '2018.8.0'
+# OVERRIDES = 'openstack_log_appender: true
+# linux_system_repo_mk_openstack_version: testing'
+
{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
@@ -22,7 +31,7 @@
{%- if OVERRIDES != '' %}
{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
- description: Override cluster parameters
cmd: |
salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
@@ -30,13 +39,27 @@
retry: {count: 1, delay: 1}
skip_fail: false
{%- endfor %}
+{%- endif %}
+
+# vkhlyunev: fa85f84 model commit contains sphinx on cfg01 node
+# which is not required for mitaka-trusty testing. Unfortunately we can not fix
+# it in model itself due to constantly updating models according to development
+# of main release.
+- description: Apply sphinx workaround - delete system.sphinx class
+ cmd: sed -i -e '/system.sphinx/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml
+ node_name: {{ HOSTNAME_CFG01 }}
+ skip_fail: False
+
+- description: Apply sphinx workaround - delete nginx section
+ cmd: sed -i -e '/ nginx:/,+8d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml
+ node_name: {{ HOSTNAME_CFG01 }}
+ skip_fail: False
- description: Refresh pillar
cmd: salt '*' saltutil.refresh_pillar
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
-{%- endif %}
{{ SHARED.MACRO_GENERATE_INVENTORY() }}
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp11-dvr/salt.yaml
index a62f1cd..b557d3a 100644
--- a/tcp_tests/templates/virtual-mcp11-dvr/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-dvr/salt.yaml
@@ -2,7 +2,7 @@
{% from 'virtual-mcp11-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-mcp11-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/salt.yaml
index 9f5e5c3..d16a126 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/salt.yaml
@@ -2,7 +2,7 @@
{% from 'virtual-mcp11-k8s-calico-minimal/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-mcp11-k8s-calico-minimal/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
index 198c0f8..36a0228 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
@@ -2,7 +2,7 @@
{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% set ENABLE_COMPUTES_SELF_REGISTER = os_env('ENABLE_COMPUTES_SELF_REGISTER', '') %}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
index 557f5dc..d0844bc 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
@@ -2,7 +2,7 @@
{% from 'virtual-mcp11-k8s-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-mcp11-k8s-contrail/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/salt.yaml
index 1deeb7d..27999e1 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/salt.yaml
@@ -2,7 +2,7 @@
{% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/salt.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/salt.yaml
index eac93fc..46c02a0 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs.new/salt.yaml
@@ -2,7 +2,7 @@
{% from 'virtual-mcp11-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-mcp11-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp11-ovs/salt.yaml
index 8ec1740..cfa0272 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs/salt.yaml
@@ -2,7 +2,7 @@
{% from 'virtual-mcp11-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-mcp11-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/core.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/core.yaml
index 8ba7026..0a43183 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/core.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/core.yaml
@@ -1,5 +1,6 @@
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
- description: remove apparmor
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -8,118 +9,11 @@
retry: {count: 1, delay: 10}
skip_fail: true
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml
index fc423cb..68a1220 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml
@@ -5,16 +5,16 @@
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
# Install OpenStack control services
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'virtual-offline-pike-ovs-dpdk') %}
{% import 'shared-backup-restore.yaml' as BACKUP with context %}
{% import 'shared-salt.yaml' as SHARED with context %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker ' + REPOSITORY_SUITE + ' stable') %}
+{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/' + REPOSITORY_SUITE + '/docker/xenial xenial stable') %}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -226,4 +226,4 @@
{{ BACKUP.MACRO_WR_NGINX_MASTER() }}
{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
-{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
\ No newline at end of file
+{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml
new file mode 100644
index 0000000..fc45c30
--- /dev/null
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml
@@ -0,0 +1,136 @@
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+# Install OpenStack control services
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'virtual-offline-pike-ovs-dpdk') %}
+{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/' + REPOSITORY_SUITE + '/docker/xenial xenial stable') %}
+
+- description: Run 'openssh' formula on cfg01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@salt:master' state.sls openssh &&
+ salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@salt:master' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+ yes/' /etc/ssh/sshd_config && service ssh reload"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Configure openssh on all nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system and not cfg01*' state.sls openssh &&
+ salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@linux:system and not cfg01*' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+ yes/' /etc/ssh/sshd_config && service ssh reload"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Upload cirros image on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Create net04_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create router
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create net04_router01'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set geteway
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Add interface
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Enable local docker repo
+ cmd: |
+ set -e;
+ echo "{{ DOCKER_LOCAL_REPO }}" > /etc/apt/sources.list.d/mcp_docker.list;
+ apt-get clean; apt-get update;
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install docker-ce on gtw
+ cmd: salt-call cmd.run 'apt-get install docker-ce -y --allow-unauthenticated'
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Enable forward policy on gtw
+ cmd: |
+ set -e;
+ iptables --policy FORWARD ACCEPT;
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: create rc file on cfg
+ cmd: scp -o StrictHostKeyChecking=no ctl01:/root/keystonercv3 /root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Copy rc file
+ cmd: scp -o StrictHostKeyChecking=no /root/keystonercv3 gtw01:/root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+{{ BACKUP.MACRO_WR_NGINX_MASTER() }}
+{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
+{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh
index 420b805..bb1316a 100755
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh
@@ -37,9 +37,9 @@
export SALT_REPOSITORY = "deb [arch=amd64] http://mirror.mirantis.local.test/" + REPOSITORY_SUITE+ "/saltstack-" + SALT_VERSION+ "/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main"
#export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2017.7 main"
export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main universe restricted"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main universe restricted"
+export UBUNTU_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
+export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main restricted universe"
+export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main restricted universe"
cd tcp_tests
py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
index 921cd7b..828a14f 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
@@ -3,6 +3,9 @@
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL01 with context %}
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL02 with context %}
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CID01 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CID02 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CID03 with context %}
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CMP01 with context %}
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CMP02 with context %}
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
@@ -15,6 +18,7 @@
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_VS with context %}
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.local.test/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
@@ -37,12 +41,16 @@
retry: {count: 1, delay: 10}
skip_fail: false
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_VS) }}
{{ VSWITCH.MACRO_CONFIGURE_VSWITCH(HOSTNAME_VS, VSWITCH_IP) }}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL01) }}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL02) }}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL03) }}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CID01) }}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CID02) }}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CID03) }}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP01) }}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP02) }}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_GTW01) }}
@@ -65,7 +73,7 @@
retry: {count: 1, delay: 10}
skip_fail: false
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog" "runtest" "auditd" "logrotate" "gnocchi" "manila"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog" "runtest" "auditd" "logrotate" "gnocchi" "manila" "jenkins" "glusterfs"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
@@ -88,15 +96,60 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Install watchdog
- cmd: salt -C "I@watchdog:server" state.sls watchdog;
+#- description: Install watchdog
+# cmd: salt -C "I@watchdog:server" state.sls watchdog;
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+- description: WR for correct acces to git repo from jenkins on cfg01 node
+ cmd: |
+ export GIT_SSL_NO_VERIFY=true; git clone --mirror https://gerrit.mcp.mirantis.local.test/mk/mk-pipelines /home/repo/mk/mk-pipelines/;
+ export GIT_SSL_NO_VERIFY=true; git clone --mirror https://gerrit.mcp.mirantis.local.test/mcp-ci/pipeline-library /home/repo/mcp-ci/pipeline-library/;
+ chown -R git:www-data /home/repo/mk/mk-pipelines/*;
+ chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
+- description: '*Workaround* to remove apt key until migrate on CC'
+ cmd: salt-key -d apt01.virtual-offline-pike-ovs-dpdk -y
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: true
+
+- description: '*Workaround* stop minion on apt like proxy node'
+ cmd: systemctl stop salt-minion.service
+ node_name: {{ HOSTNAME_APT01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: true
+
- description: Workaround to avoid reboot cmp nodes bring OVS interfaces UP
cmd: |
salt 'cmp*' cmd.run "ifup br-mesh";
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
- skip_fail: false
\ No newline at end of file
+ skip_fail: false
+
+- description: Temporary WR
+ cmd: |
+ ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts || true;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Enable Jenkins
+ cmd: |
+ systemctl enable jenkins || true;
+ systemctl restart jenkins || true;
+ sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: run jenkins.client
+ cmd: |
+ salt-call state.sls jenkins.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 60}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-apt01.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-apt01.yaml
index 7297a41..fe2c8f3 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-apt01.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-apt01.yaml
@@ -83,7 +83,7 @@
- find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_MCP_MIRANTIS==/{{ os_env('HOST_MIRROR_MCP_MIRANTIS', 'mirror.mcp.mirantis.net') }}/g" {} +
- find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_FUEL_INFRA==/{{ os_env('HOST_MIRROR_FUEL_INFRA', 'mirror.fuel-infra.org') }}/g" {} +
- find /srv/pillar/ -type f -exec sed -i "s/==HOST_PPA_LAUNCHPAD==/{{ os_env('HOST_PPA_LAUNCHPAD', 'ppa.launchpad.net') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.net') }}/g" {} +
+ - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.com') }}/g" {} +
- salt-call --local --state-output=mixed state.sls dnsmasq;
- salt-call --local --state-output=mixed state.sls nginx;
########################################################
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
index 79adad5..800a0b1 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -48,15 +46,12 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- - echo "Preparing base OS"
- - sleep 160;
- # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
write_files:
- path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml
index ab4dbe5..15da576 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml
@@ -19,6 +19,9 @@
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
{% set HOSTNAME_APT01 = os_env('HOSTNAME_APT01', 'apt01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
@@ -45,6 +48,10 @@
l2_network_device: +1
default_{{ HOSTNAME_APT01 }}: +122
default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CID }}: +80
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
default_{{ HOSTNAME_CTL01 }}: +101
default_{{ HOSTNAME_CTL02 }}: +102
default_{{ HOSTNAME_CTL03 }}: +103
@@ -59,7 +66,7 @@
default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_VS }}: +178
ip_ranges:
- dhcp: [+90, -10]
+ dhcp: [+60, -10]
admin-pool01:
net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
@@ -69,6 +76,10 @@
l2_network_device: +1
default_{{ HOSTNAME_APT01 }}: +122
default_{{ HOSTNAME_CFG01 }}: +90
+ default_{{ HOSTNAME_CID }}: +80
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
default_{{ HOSTNAME_CTL01 }}: +101
default_{{ HOSTNAME_CTL02 }}: +102
default_{{ HOSTNAME_CTL03 }}: +103
@@ -83,7 +94,7 @@
default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_VS }}: +178
ip_ranges:
- dhcp: [+90, -10]
+ dhcp: [+60, -10]
tenant-pool01:
net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
@@ -666,3 +677,81 @@
interfaces: *interfaces
network_config: *network_config
+
+ - name: {{ HOSTNAME_CID01 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID02 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID03 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/vswitch-config.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/vswitch-config.yaml
index 705e9be..df9fd73 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/vswitch-config.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/vswitch-config.yaml
@@ -1,6 +1,21 @@
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
+{% set OPENSTACK_PIKE_REPOSITORY = os_env('OPENSTACK_PIKE_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.local.test/" + REPOSITORY_SUITE + "/openstack-pike/xenial/ xenial main") %}
+{% set UBUNTU_KEY_SERVER = os_env('UBUNTU_KEY_SERVER', 'keyserver.ubuntu.com') %}
+{% set UBUNTU_KEY_ID = os_env('UBUNTU_KEY_ID', '0E08A149DE57BFBE') %}
+
{%- macro MACRO_CONFIGURE_VSWITCH(NODE_NAME, IP) %}
{#################################################}
+- description: 'Enable openstack repo for needed packages '
+ cmd: |
+ apt-key adv --keyserver "{{UBUNTU_KEY_SERVER}}" --recv-keys "{{ UBUNTU_KEY_ID}}"
+ echo "{{ OPENSTACK_PIKE_REPOSITORY }}" > /etc/apt/sources.list.d/openstack.list
+ eatmydata apt-get clean;
+ apt-get update;
+ sync;
+ node_name: {{ NODE_NAME }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
- description: 'Install openvswitch-vtep package and configure it'
cmd: |
@@ -8,9 +23,9 @@
ifconfig ens4 up
apt-get update
- apt-get -y install openvswitch-switch
+ apt-get -y install openvswitch-switch --allow-unauthenticated
service openvswitch-switch stop
- apt-get -y install openvswitch-vtep bridge-utils
+ apt-get -y install openvswitch-vtep bridge-utils --allow-unauthenticated
ovsdb-tool create /etc/openvswitch/vtep.db /usr/share/openvswitch/vtep.ovsschema
ovsdb-tool create /etc/openvswitch/vswitch.db /usr/share/openvswitch/vswitch.ovsschema
@@ -67,7 +82,13 @@
skip_fail: false
- description: 'Refresh pillar data after L2GW enablement'
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar; sleep 15
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: 'Sync all'
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -75,7 +96,7 @@
- description: 'Check L2GW is enabled'
cmd: salt 'gtw01*' pillar.get neutron:gateway:l2gw:enabled | grep True
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 3, delay: 5}
skip_fail: false
{%- endmacro %}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/core.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/core.yaml
index b3a1404..4d69c89 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/core.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/core.yaml
@@ -1,5 +1,7 @@
{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
- description: remove apparmor
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
'*' cmd.run 'service apparmor stop; service apparmor teardown; update-rc.d -f apparmor remove; apt-get -y remove apparmor'
@@ -7,118 +9,11 @@
retry: {count: 1, delay: 10}
skip_fail: true
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
index 40764b1..d362573 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
@@ -6,15 +6,16 @@
{% from 'virtual-offline-pike-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'virtual-offline-pike-ovs') %}
{% import 'shared-backup-restore.yaml' as BACKUP with context %}
{% import 'shared-salt.yaml' as SHARED with context %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker ' + REPOSITORY_SUITE + ' stable') %}
+{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/' + REPOSITORY_SUITE + '/docker/xenial xenial stable') %}
+
# Install OpenStack control services
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh b/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh
index d1e0380..a33e90f 100755
--- a/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh
@@ -37,9 +37,9 @@
export SALT_REPOSITORY = "deb [arch=amd64] http://mirror.mirantis.local.test/" + REPOSITORY_SUITE+ "/saltstack-" + SALT_VERSION+ "/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main"
#export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2017.7 main"
export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main universe restricted"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main universe restricted"
+export UBUNTU_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
+export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main restricted universe"
+export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main restricted universe"
cd tcp_tests
py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-apt01.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-apt01.yaml
index 7297a41..fe2c8f3 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-apt01.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-apt01.yaml
@@ -83,7 +83,7 @@
- find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_MCP_MIRANTIS==/{{ os_env('HOST_MIRROR_MCP_MIRANTIS', 'mirror.mcp.mirantis.net') }}/g" {} +
- find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_FUEL_INFRA==/{{ os_env('HOST_MIRROR_FUEL_INFRA', 'mirror.fuel-infra.org') }}/g" {} +
- find /srv/pillar/ -type f -exec sed -i "s/==HOST_PPA_LAUNCHPAD==/{{ os_env('HOST_PPA_LAUNCHPAD', 'ppa.launchpad.net') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.net') }}/g" {} +
+ - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.com') }}/g" {} +
- salt-call --local --state-output=mixed state.sls dnsmasq;
- salt-call --local --state-output=mixed state.sls nginx;
########################################################
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml
index 79adad5..800a0b1 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -48,15 +46,12 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- - echo "Preparing base OS"
- - sleep 160;
- # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
write_files:
- path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-offline-ssl/core.yaml b/tcp_tests/templates/virtual-offline-ssl/core.yaml
index 0c75bb4..c08b0cd 100644
--- a/tcp_tests/templates/virtual-offline-ssl/core.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/core.yaml
@@ -1,5 +1,7 @@
{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
- description: remove apparmor
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
'*' cmd.run 'service apparmor stop; service apparmor teardown; update-rc.d -f apparmor remove; apt-get -y remove apparmor'
@@ -7,118 +9,11 @@
retry: {count: 1, delay: 10}
skip_fail: true
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-offline-ssl/openstack.yaml b/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
index a2c296c..e84ed31 100644
--- a/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
@@ -15,8 +15,7 @@
{% import 'shared-salt.yaml' as SHARED with context %}
{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker ' + REPOSITORY_SUITE + ' stable') %}
-
+{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/' + REPOSITORY_SUITE + '/docker/xenial xenial stable') %}
# Install OpenStack control services
@@ -34,7 +33,7 @@
retry: {count: 1, delay: 5}
skip_fail: true
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
diff --git a/tcp_tests/templates/virtual-offline-ssl/run_test.sh b/tcp_tests/templates/virtual-offline-ssl/run_test.sh
index 1695eae..747f959 100755
--- a/tcp_tests/templates/virtual-offline-ssl/run_test.sh
+++ b/tcp_tests/templates/virtual-offline-ssl/run_test.sh
@@ -35,10 +35,11 @@
export FORMULA_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial ${REPOSITORY_SUITE} salt extra"
export FORMULA_GPG="http://apt.mirantis.local.test/public.gpg"
export SALT_REPOSITORY = "deb [arch=amd64] http://mirror.mirantis.local.test/" + REPOSITORY_SUITE+ "/saltstack-" + SALT_VERSION+ "/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main"
+#export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2017.7 main"
export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main universe restricted"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main universe restricted"
+export UBUNTU_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
+export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main restricted universe"
+export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main restricted universe"
cd tcp_tests
py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-apt01.yaml b/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-apt01.yaml
index 7297a41..fe2c8f3 100644
--- a/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-apt01.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-apt01.yaml
@@ -83,7 +83,7 @@
- find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_MCP_MIRANTIS==/{{ os_env('HOST_MIRROR_MCP_MIRANTIS', 'mirror.mcp.mirantis.net') }}/g" {} +
- find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_FUEL_INFRA==/{{ os_env('HOST_MIRROR_FUEL_INFRA', 'mirror.fuel-infra.org') }}/g" {} +
- find /srv/pillar/ -type f -exec sed -i "s/==HOST_PPA_LAUNCHPAD==/{{ os_env('HOST_PPA_LAUNCHPAD', 'ppa.launchpad.net') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.net') }}/g" {} +
+ - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.com') }}/g" {} +
- salt-call --local --state-output=mixed state.sls dnsmasq;
- salt-call --local --state-output=mixed state.sls nginx;
########################################################
diff --git a/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-cfg01.yaml
index 79adad5..800a0b1 100644
--- a/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -48,15 +46,12 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- - echo "Preparing base OS"
- - sleep 160;
- # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
write_files:
- path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
index c72c3bb..81d958d 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
@@ -13,7 +13,7 @@
control_vlan: '10'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 192.168.10.1
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
@@ -144,7 +144,7 @@
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
shared_reclass_branch: master
- shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
stacklight_enabled: 'False'
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml
index 050c4c4..5da2666 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml
@@ -17,18 +17,18 @@
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
# Start compute node addresses from .105 , as in static models
- sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
. /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/core.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/core.yaml
index 202a7e2..11a7665 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/core.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/core.yaml
@@ -1,117 +1,12 @@
{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml
index f6e554e..3aa53f9 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml
@@ -12,7 +12,7 @@
# Install OpenStack control services
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -166,4 +166,4 @@
{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
{{ BACKUP.MACRO_WR_NGINX_MASTER() }}
{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
-{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
\ No newline at end of file
+{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
index de035c2..bcbcd75 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
@@ -2,7 +2,7 @@
{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
@@ -11,7 +11,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja" "runtest" "logrotate"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja" "runtest" "logrotate" "jenkins"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - mkdir -p /srv/salt/reclass/nodes
+ - systemctl enable salt-master
+ - systemctl enable salt-minion
+ - systemctl start salt-master
+ - systemctl start salt-minion
+ - salt-call -l info --timeout=120 test.ping
+
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/tests/system/test_3rdparty_suites.py b/tcp_tests/tests/system/test_3rdparty_suites.py
index d545532..78583af 100644
--- a/tcp_tests/tests/system/test_3rdparty_suites.py
+++ b/tcp_tests/tests/system/test_3rdparty_suites.py
@@ -33,7 +33,7 @@
@pytest.mark.parametrize("_", [settings.ENV_NAME])
@pytest.mark.run_tempest
def test_run_tempest(self, tempest_actions, show_step, _):
- """Runner for Juniper contrail-tests
+ """Runner for Openstack tempest tests
Scenario:
1. Run tempest
@@ -83,11 +83,16 @@
k8s_actions.run_conformance()
@pytest.mark.grab_versions
+ @pytest.mark.extract(container_system='docker',
+ extract_from='mirantis/virtlet',
+ files_to_extract=['conformance_virtlet_result.xml'])
@pytest.mark.grab_k8s_results(name=['virtlet_conformance.log',
- 'report.xml'])
+ 'conformance_virtlet_result.xml'])
@pytest.mark.parametrize("_", [settings.ENV_NAME])
@pytest.mark.k8s_conformance_virtlet
def test_run_k8s_conformance_virtlet(self, show_step, config, k8s_actions,
k8s_logs, _):
"""Test run of k8s virtlet conformance tests"""
- k8s_actions.run_virtlet_conformance()
+ config.k8s.run_extended_virtlet_conformance = True
+ k8s_actions.run_virtlet_conformance(
+ report_name="conformance_virtlet_result.xml")
diff --git a/tcp_tests/tests/system/test_calico.py b/tcp_tests/tests/system/test_calico.py
index ca5c116..6399eb3 100644
--- a/tcp_tests/tests/system/test_calico.py
+++ b/tcp_tests/tests/system/test_calico.py
@@ -107,9 +107,7 @@
assert len(first_node_ips) > 0, "Couldn't find first k8s node IP!"
first_node_names = [name for name in underlay.node_names()
if name.startswith(first_node.name)]
- assert len(first_node_names) == 1, "Couldn't find first k8s node " \
- "hostname in SSH config!"
- first_node_name = first_node_names.pop()
+ first_node_name = first_node_names[0]
target_pod_ip = None
diff --git a/tcp_tests/tests/system/test_failover_k8s.py b/tcp_tests/tests/system/test_failover_k8s.py
index a334a42..b872c36 100644
--- a/tcp_tests/tests/system/test_failover_k8s.py
+++ b/tcp_tests/tests/system/test_failover_k8s.py
@@ -21,7 +21,7 @@
class TestFailoverK8s(object):
- @pytest.mark.grap_versions
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
def test_k8s_master_vip_migration(self, show_step, k8s_deployed, underlay,
k8s_actions, core_actions,
diff --git a/tcp_tests/tests/system/test_install_k8s.py b/tcp_tests/tests/system/test_install_k8s.py
index 8066cd9..ba5a81d 100644
--- a/tcp_tests/tests/system/test_install_k8s.py
+++ b/tcp_tests/tests/system/test_install_k8s.py
@@ -106,7 +106,11 @@
# todo (tleontovich) add asserts here and extend the tests
# with acceptance criteria
show_step(10)
+
# Run SL component tests
+ stacklight_deployed.setup_sl_functional_tests(
+ 'cfg01',
+ )
stacklight_deployed.run_sl_functional_tests(
'cfg01',
'/root/stacklight-pytest/stacklight_tests/',
@@ -163,6 +167,9 @@
stacklight_deployed.check_prometheus_targets(mon_nodes)
show_step(6)
# Run SL component tests
+ stacklight_deployed.setup_sl_functional_tests(
+ 'cfg01',
+ )
stacklight_deployed.run_sl_functional_tests(
'cfg01',
'/root/stacklight-pytest/stacklight_tests/',
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index 69d8324..a34496b 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -16,6 +16,7 @@
from tcp_tests import logger
from tcp_tests import settings
+from tcp_tests.managers.jenkins.client import JenkinsClient
LOG = logger.logger
@@ -280,6 +281,7 @@
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
+ @pytest.mark.offline_dpdk
def test_mcp_dpdk_ovs_install(self, underlay,
openstack_deployed,
openstack_actions,
@@ -296,6 +298,106 @@
tgt='*', fun='cmd.run',
args='service ntp stop; ntpd -gq; service ntp start')
if settings.RUN_TEMPEST:
- tempest_actions.prepare_and_run_tempest(dpdk=True)
+ tempest_actions.prepare_and_run_tempest()
LOG.info("*************** DONE **************")
+
+ @pytest.mark.fail_snapshot
+ @pytest.mark.offline_dpdk
+ def test_pipeline_deploy_os_dpdk(self, show_step,
+ underlay, config, salt_deployed,
+ tempest_actions,
+ openstack_actions):
+ """Deploy cid, deploys os with pipelines
+
+ Scenario:
+ 1. Prepare salt on hosts.
+ 2. Connect to jenkins on cfg01 node
+ 3. Run deploy on cfg01 node
+ 4. Connect to jenkins on cid node
+ 5. Run deploy DT on cid node
+ 6. Run deploy of os with DT
+ """
+ show_step(1)
+ nodes = underlay.node_names()
+ LOG.info("Nodes - {}".format(nodes))
+ show_step(2)
+ cfg_node_name = underlay.get_target_node_names(
+ target='cfg')[0]
+ salt_api = salt_deployed.get_pillar(
+ cfg_node_name, '_param:jenkins_salt_api_url')
+ salt_api = salt_api[0].get(cfg_node_name)
+ jenkins = JenkinsClient(
+ host='http://{}:8081'.format(config.salt.salt_master_host),
+ username='admin',
+ password='r00tme')
+ params = jenkins.make_defults_params('deploy_openstack')
+ params['SALT_MASTER_URL'] = salt_api
+ params['STACK_INSTALL'] = 'core,cicd'
+
+ show_step(3)
+ build = jenkins.run_build('deploy_openstack', params)
+ jenkins.wait_end_of_build(
+ name=build[0],
+ build_id=build[1],
+ timeout=60 * 60 * 4)
+ result = jenkins.build_info(name=build[0],
+ build_id=build[1])['result']
+ assert result == 'SUCCESS', \
+ "Deploy core, cid failed {0}{1}".format(
+ jenkins.build_info(name=build[0], build_id=build[1]), result)
+
+ show_step(4)
+ cid_node = underlay.get_target_node_names(
+ target='cid01')[0]
+ salt_output = salt_deployed.get_pillar(
+ cid_node, 'jenkins:client:master:password')
+ cid_passwd = salt_output[0].get(cid_node)
+
+ pillar = 'keepalived:cluster:instance:cicd_control_vip:address'
+ addresses = salt_deployed.get_pillar('cid01*', pillar)
+ ip = list(set([ip
+ for item in addresses
+ for node, ip in item.items() if ip]))
+ LOG.info('Jenkins ip is {}'.format(ip))
+ try:
+ assert len(ip) > 0, 'fail to find jenkins ip'
+ except AssertionError:
+ salt_deployed._salt.local(
+ tgt='cid*', fun='cmd.run',
+ args='service keepalived restart')
+ addresses = salt_deployed.get_pillar('cid01*', pillar)
+ ip = list(set([ip
+ for item in addresses
+ for node, ip in item.items() if ip]))
+ LOG.info('Jenkins ip is {}'.format(ip))
+ assert len(ip) > 0, 'fail to find jenkins ip {}'.format(addresses)
+
+ jenkins = JenkinsClient(
+ host='http://{}:8081'.format(ip[0]),
+ username='admin',
+ password=cid_passwd)
+ params['STACK_INSTALL'] = 'ovs,openstack'
+ params['SALT_MASTER_URL'] = 'http://{}:6969'.format(
+ config.salt.salt_master_host)
+ show_step(5)
+ build = jenkins.run_build('deploy_openstack', params)
+ jenkins.wait_end_of_build(
+ name=build[0],
+ build_id=build[1],
+ timeout=60 * 60 * 4)
+ result = jenkins.build_info(name=build[0],
+ build_id=build[1])['result']
+ assert result == 'SUCCESS',\
+ "Deploy openstack was failed with results {0} {1}".format(
+ jenkins.build_info(name=build[0], build_id=build[1]),
+ result)
+
+ # Prepare resources before test
+ steps_path = config.openstack_deploy.openstack_resources_steps_path
+ commands = underlay.read_template(steps_path)
+ openstack_actions.install(commands)
+
+ if settings.RUN_TEMPEST:
+ tempest_actions.prepare_and_run_tempest()
+ LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_mcp_queens.py b/tcp_tests/tests/system/test_install_mcp_queens.py
new file mode 100644
index 0000000..b644d26
--- /dev/null
+++ b/tcp_tests/tests/system/test_install_mcp_queens.py
@@ -0,0 +1,220 @@
+# Copyright 2018 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pytest
+
+from tcp_tests import logger
+from tcp_tests import settings
+
+LOG = logger.logger
+
+
+class TestMcpInstallQueensCeph(object):
+ """Test class for testing mcp queens ceph deploy"""
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.cookied_mcp_queens_dvr_ceph
+ def test_cookied_mcp_queens_dvr_ceph(self, underlay,
+ openstack_deployed,
+ tempest_actions):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run tempest
+
+ """
+ openstack_deployed._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+ if settings.RUN_TEMPEST:
+ tempest_actions.prepare_and_run_tempest()
+
+ LOG.info("*************** DONE **************")
+
+
+class TestMcpInstallQueensOvs(object):
+ """Test class for testing mcp queens ovs deploy"""
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.cookied_mcp_queens_ovs
+ def test_cookied_mcp_queens_ovs(self, underlay,
+ openstack_deployed,
+ stacklight_deployed,
+ tempest_actions):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run tempest
+
+ """
+ openstack_deployed._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+ if settings.RUN_TEMPEST:
+ tempest_actions.prepare_and_run_tempest()
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.cookied_mcp_queens_ovs
+ def test_cookied_mcp_queens_ovs_sl(self, underlay,
+ openstack_deployed,
+ stacklight_deployed):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run stacklight tests
+
+ """
+ openstack_deployed._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ # Run SL component tests
+ stacklight_deployed.run_sl_functional_tests(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus',
+ 'test_alerts.py')
+
+ # Download report
+ stacklight_deployed.download_sl_test_report(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/report.xml')
+ LOG.info("*************** DONE **************")
+
+
+class TestMcpInstallQueensDvr(object):
+ """Test class for testing mcp queens dvr deploy"""
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.cookied_mcp_queens_dvr
+ def test_cookied_mcp_queens_dvr(self, underlay,
+ openstack_deployed,
+ stacklight_deployed,
+ tempest_actions):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run tempest
+
+ """
+ openstack_deployed._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+ if settings.RUN_TEMPEST:
+ tempest_actions.prepare_and_run_tempest()
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.cookied_mcp_queens_dvr
+ def test_cookied_mcp_queens_dvr_sl(self, underlay,
+ openstack_deployed,
+ stacklight_deployed):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run stacklight tests
+
+ """
+ openstack_deployed._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ # Run SL component tests
+ stacklight_deployed.run_sl_functional_tests(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus',
+ 'test_alerts.py')
+
+ # Download report
+ stacklight_deployed.download_sl_test_report(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/report.xml')
+ LOG.info("*************** DONE **************")
+
+
+class TestMcpInstallQueensDvrSsl(object):
+ """Test class for testing mcp queens dvr deploy"""
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.cookied_mcp_queens_dvr_ssl
+ def test_cookied_mcp_queens_dvr_ssl(self, underlay,
+ openstack_deployed,
+ stacklight_deployed,
+ tempest_actions):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run tempest
+
+ """
+ openstack_deployed._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+ if settings.RUN_TEMPEST:
+ tempest_actions.prepare_and_run_tempest()
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.cookied_mcp_queens_dvr_ssl
+ def test_cookied_mcp_queens_dvr_ssl_sl(self, underlay,
+ openstack_deployed,
+ stacklight_deployed):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run stacklight tests
+
+ """
+ openstack_deployed._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ # Run SL component tests
+ stacklight_deployed.run_sl_functional_tests(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus',
+ 'test_alerts.py')
+
+ # Download report
+ stacklight_deployed.download_sl_test_report(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/report.xml')
+ LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_opencontrail.py b/tcp_tests/tests/system/test_install_opencontrail.py
index efc37c8..63a60f2 100644
--- a/tcp_tests/tests/system/test_install_opencontrail.py
+++ b/tcp_tests/tests/system/test_install_opencontrail.py
@@ -52,14 +52,14 @@
if settings.RUN_SL_TESTS:
show_step(5)
stacklight_deployed.run_sl_functional_tests(
- 'ctl01',
+ 'cfg01',
'/root/stacklight-pytest/stacklight_tests/',
'tests/prometheus',
'test_alerts.py')
show_step(8)
# Download report
stacklight_deployed.download_sl_test_report(
- 'ctl01',
+ 'cfg01',
'/root/stacklight-pytest/stacklight_tests/report.xml')
LOG.info("*************** DONE **************")
@@ -121,5 +121,74 @@
stacklight_deployed.download_sl_test_report(
'ctl01',
'/root/stacklight-pytest/stacklight_tests/report.xml')
+ LOG.info("*************** DONE **************")
+ @pytest.mark.extract(container_system='docker', extract_from='conformance',
+ files_to_extract=['report'])
+ @pytest.mark.merge_xunit(path='/root/report',
+ output='/root/conformance_result.xml')
+ @pytest.mark.grab_k8s_results(name=['k8s_conformance.log',
+ 'conformance_result.xml'])
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_install_opencontrail4_k8s(self, config, show_step,
+ k8s_deployed, k8s_logs):
+ """Test for deploying MCP environment with k8s and check it
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Setup Kubernetes cluster
+ 5. Run conformance if need
+
+ """
+
+ if config.k8s.k8s_conformance_run:
+ show_step(5)
+ k8s_deployed.run_conformance(raise_on_err=False)
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.extract(container_system='docker', extract_from='conformance',
+ files_to_extract=['report'])
+ @pytest.mark.merge_xunit(path='/root/report',
+ output='/root/conformance_result.xml')
+ @pytest.mark.grab_k8s_results(name=['k8s_conformance.log',
+ 'conformance_result.xml'])
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_install_opencontrail4_k8s_lma(self, config, show_step,
+ k8s_deployed,
+ stacklight_deployed,
+ k8s_logs):
+ """Test for deploying MCP environment with k8s and check it
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Setup Kubernetes cluster
+ 5. Check targets
+ 6. Check docker services
+ 7. Run SL tests
+ 8. Download SL report
+ 9. Run conformance if need
+ """
+ # Run SL component tetsts
+ if settings.RUN_SL_TESTS:
+ show_step(7)
+ stacklight_deployed.run_sl_functional_tests(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus',
+ 'test_alerts.py')
+ show_step(8)
+ # Download report
+ stacklight_deployed.download_sl_test_report(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/report.xml')
+
+ if config.k8s.k8s_conformance_run:
+ show_step(9)
+ k8s_deployed.run_conformance(raise_on_err=False)
LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_k8s_actions.py b/tcp_tests/tests/system/test_k8s_actions.py
index 7f01cf5..6467a8a 100644
--- a/tcp_tests/tests/system/test_k8s_actions.py
+++ b/tcp_tests/tests/system/test_k8s_actions.py
@@ -16,11 +16,13 @@
import netaddr
import os
import json
+import requests
from tcp_tests import logger
from tcp_tests import settings
from tcp_tests.managers.k8s import read_yaml_file
+from tcp_tests.managers.jenkins.client import JenkinsClient
LOG = logger.logger
@@ -60,7 +62,7 @@
svc = deployment.expose()
show_step(4)
- hostname = "test.{0}.local.".format(settings.LAB_CONFIG_NAME)
+ hostname = "test.{0}.".format(settings.DOMAIN_NAME)
svc.patch({
"metadata": {
"annotations": {
@@ -70,7 +72,9 @@
})
show_step(5)
- k8s_deployed.nslookup(hostname, svc.get_ip())
+ dns_svc = k8s_deployed.api.services.get(
+ name='coredns', namespace='kube-system')
+ k8s_deployed.nslookup(hostname, dns_svc.get_ip())
show_step(6)
deployment.delete()
@@ -91,7 +95,7 @@
show_step(1)
k8s_deployed.start_k8s_cncf_verification()
- @pytest.mark.grap_versions
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
def test_k8s_chain_update(self, show_step, underlay, config, k8s_deployed,
k8s_chain_update_log_helper):
@@ -138,8 +142,9 @@
show_step(8)
sample.delete()
- @pytest.mark.grap_versions
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
+ @pytest.mark.k8s_metallb
def test_k8s_metallb(self, show_step, config, k8s_deployed):
"""Enable metallb in cluster and do basic tests
@@ -154,7 +159,7 @@
8. Delete deployments
"""
show_step(1)
- if not config.k8s_deploy.kubernetes_metallb_enabled:
+ if not k8s_deployed.is_metallb_enabled:
pytest.skip("Test requires metallb addon enabled")
show_step(2)
@@ -192,7 +197,7 @@
for sample in samples:
sample.delete()
- @pytest.mark.grap_versions
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
@pytest.mark.k8s_genie
def test_k8s_genie_flannel(self, show_step, config,
@@ -219,7 +224,7 @@
show_step(1)
# Find out calico and flannel networks
- tgt_k8s_control = "I@kubernetes:control:enabled:True"
+ tgt_k8s_control = "I@kubernetes:master"
flannel_pillar = salt_deployed.get_pillar(
tgt=tgt_k8s_control,
@@ -311,8 +316,9 @@
multicni_pod.delete()
nocni_pod.delete()
- @pytest.mark.grap_versions
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
+ @pytest.mark.k8s_dashboard
def test_k8s_dashboard(self, show_step, config,
salt_deployed, k8s_deployed):
"""Test dashboard setup
@@ -385,3 +391,113 @@
[ns.name for ns in k8s_deployed.api.namespaces.list()]
for namespace in dashboard_namespaces:
assert namespace['objectMeta']['name'] in namespaces_names_list
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.k8s_ingress_nginx
+ def test_k8s_ingress_nginx(self, show_step, config,
+ salt_deployed, k8s_deployed):
+ """Test ingress-nginx configured and working with metallb
+
+ Scenario:
+ 1. Setup Kubernetes cluster with metallb
+ 2. Create 2 example deployments and expose them
+ 3. Create ingress controller with 2 backends to each deployment
+ service respectively
+ 4. Wait ingress for deploy
+ 5. Try to reach default endpoint
+ 6. Try to reach test1 and test2 deployment services endpoints
+ """
+ show_step(1)
+ if not k8s_deployed.is_metallb_enabled:
+ pytest.skip("Test requires metallb addon enabled")
+ if not k8s_deployed.is_ingress_nginx_enabled:
+ pytest.skip("Test requires ingress-nginx addon enabled")
+
+ show_step(2)
+ image = 'nginxdemos/hello:plain-text'
+ port = 80
+ dep1 = k8s_deployed.run_sample_deployment(
+ 'dep-ingress-1', image=image, port=port)
+ dep2 = k8s_deployed.run_sample_deployment(
+ 'dep-ingress-2', image=image, port=port)
+ svc1 = dep1.wait_ready().expose()
+ svc2 = dep2.wait_ready().expose()
+
+ show_step(3)
+ body = {
+ 'apiVersion': 'extensions/v1beta1',
+ 'kind': 'Ingress',
+ 'metadata': {'name': 'ingress-test'},
+ 'spec': {
+ 'rules': [{'http': {
+ 'paths': [{
+ 'backend': {
+ 'serviceName': svc1.name,
+ 'servicePort': port},
+ 'path': '/test1'}, {
+ 'backend': {
+ 'serviceName': svc2.name,
+ 'servicePort': port},
+ 'path': '/test2'
+ }]
+ }}]
+ }
+ }
+ ingress = k8s_deployed.api.ingresses.create(body=body)
+
+ show_step(4)
+ ingress.wait_ready()
+
+ show_step(5)
+ ingress_address = "https://{}".format(
+ ingress.read().status.load_balancer.ingress[0].ip)
+
+ assert requests.get(ingress_address, verify=False).status_code == 404
+
+ show_step(6)
+ req1 = requests.get(ingress_address + "/test1", verify=False)
+ assert req1.status_code == 200
+ assert 'dep-ingress-1' in req1.text
+
+ req2 = requests.get(ingress_address + "/test2", verify=False)
+ assert req2.status_code == 200
+ assert 'dep-ingress-2' in req2.text
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_k8s_cicd_upgrade(self, show_step, config,
+ salt_deployed, k8s_deployed):
+ """Test k8s upgrade cicd pipeline
+
+ Scenario:
+ 1. Setup Kubernetes+CICD cluster
+ 2. Start deploy-k8s-upgrade job in jenkins
+ 3. Wait for job to end
+ """
+ show_step(1)
+ jenkins_info = salt_deployed.get_pillar(
+ tgt='cid*1*', pillar="jenkins:client:master")[0].values()[0]
+
+ salt_api = salt_deployed.get_pillar(
+ tgt='cid*1*', pillar="_param:jenkins_salt_api_url")[0].values()[0]
+
+ show_step(2)
+ jenkins = JenkinsClient(
+ host='http://{host}:{port}'.format(**jenkins_info),
+ username=jenkins_info['username'],
+ password=jenkins_info['password'])
+
+ params = jenkins.make_defults_params('deploy-k8s-upgrade')
+ params['SALT_MASTER_URL'] = salt_api
+ params['SALT_MASTER_CREDENTIALS'] = 'salt'
+ params['CONFORMANCE_RUN_AFTER'] = True
+ params['CONFORMANCE_RUN_BEFORE'] = True
+ build = jenkins.run_build('deploy-k8s-upgrade', params)
+
+ show_step(3)
+ jenkins.wait_end_of_build(
+ name=build[0], build_id=build[1], timeout=3600 * 4)
+ result = jenkins.build_info(
+ name=build[0], build_id=build[1])['result']
+ assert result == 'SUCCESS', "k8s upgrade job has been failed"
diff --git a/tcp_tests/tests/system/test_offline.py b/tcp_tests/tests/system/test_offline.py
index 44b82f0..6c083cb 100644
--- a/tcp_tests/tests/system/test_offline.py
+++ b/tcp_tests/tests/system/test_offline.py
@@ -90,11 +90,19 @@
LOG.info(f)
# show_step(8)
- nodes_amount = len(hardware.slave_nodes)
- cmd = """ timeout 1800s bash -c 'hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); while ! [ $(echo "$hosts" | wc -w) -eq {amount} ]; do echo "Ready hosts:\n$hosts"; sleep 30; hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); done ' """.format(amount=nodes_amount) # noqa
+ # nodes_amount = len(hardware.slave_nodes)
+ # cmd = """ timeout 600s bash -c 'hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); while ! [ $(echo "$hosts" | wc -w) -eq {amount} ]; do echo "Ready hosts:\n$hosts"; sleep 30; hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); done ' """.format(amount=nodes_amount) # noqa
+ cmd = """salt-call state.sls maas.machines.wait_for_ready"""
underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
underlay.check_call(node_name=cfg_node, verbose=verbose,
cmd='salt-key')
+
+ r, f = day1_cfg_config.salt.enforce_state(
+ 'cfg01*',
+ 'maas.machines.assign_ip')
+ LOG.info(r)
+ LOG.info(f)
+
# show_step(9)
underlay.check_call(
node_name=cfg_node, verbose=verbose,
@@ -220,24 +228,24 @@
cmd='salt "*" ssh.set_auth_key ubuntu '
'"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
- underlay.check_call(
- node_name=cfg_node,
- verbose=verbose,
- cmd='salt-call state.sls maas.region')
- underlay.check_call(
- node_name=cfg_node,
- verbose=verbose,
- cmd='maas logout mirantis && '
- 'maas login mirantis '
- 'http://localhost:5240/MAAS/api/2.0/ '
- 'FTvqwe7ybBp68gPar2:5mcctTAXVL8mns4ef4:zrA9LZwu2tMc8BAZpsPUfwWwTyQnAtDN' # noqa
- )
+ # underlay.check_call(
+ # node_name=cfg_node,
+ # verbose=verbose,
+ # cmd='salt-call state.sls maas.region')
+ # underlay.check_call(
+ # node_name=cfg_node,
+ # verbose=verbose,
+ # cmd='maas logout mirantis && '
+ # 'maas login mirantis '
+ # 'http://localhost:5240/MAAS/api/2.0/ '
+ # 'FTvqwe7ybBp68gPar2:5mcctTAXVL8mns4ef4:zrA9LZwu2tMc8BAZpsPUfwWwTyQnAtDN' # noqa
+ # )
- underlay.check_call(
- node_name=cfg_node,
- verbose=verbose,
- cmd="maas mirantis maas set-config "
- "name=upstream_dns value='10.10.0.15 8.8.8.8 8.8.4.4'")
+ # underlay.check_call(
+ # node_name=cfg_node,
+ # verbose=verbose,
+ # cmd="maas mirantis maas set-config "
+ # "name=upstream_dns value='10.10.0.15 8.8.8.8 8.8.4.4'")
# underlay.check_call(
# node_name=cfg_node,
@@ -247,13 +255,13 @@
# "subnet=$(maas mirantis subnets read | jq '.[] | "
# "select(.name==\"10.10.0.0/16\") | .id')")
- underlay.check_call(
- node_name=cfg_node,
- verbose=verbose,
- cmd="maas mirantis vlan update "
- "$(maas mirantis subnets read | jq '.[] | "
- "select(.name==\"10.10.0.0/16\") | .vlan.fabric_id') "
- "0 dhcp_on=True primary_rack='cfg01'")
+ # underlay.check_call(
+ # node_name=cfg_node,
+ # verbose=verbose,
+ # cmd="maas mirantis vlan update "
+ # "$(maas mirantis subnets read | jq '.[] | "
+ # "select(.name==\"10.10.0.0/16\") | .vlan.fabric_id') "
+ # "0 dhcp_on=True primary_rack='cfg01'")
underlay.check_call(
node_name=cfg_node,
@@ -278,7 +286,8 @@
verbose=verbose,
cmd='salt-call state.sls maas.machines')
show_step(5)
- cmd = """ timeout 600s bash -c 'hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); while ! [ $(echo "$hosts" | wc -w) -eq 10 ]; do echo "Ready hosts:\n$hosts"; sleep 30; hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); done ' """ # noqa
+ # cmd = """ timeout 1200s bash -c 'hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); while ! [ $(echo "$hosts" | wc -w) -eq 10 ]; do echo "Ready hosts:\n$hosts"; sleep 30; hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); done ' """ # noqa
+ cmd = """salt-call state.sls maas.machines.wait_for_ready"""
underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
underlay.check_call(
node_name=cfg_node, verbose=verbose, cmd='salt-key')
@@ -318,7 +327,7 @@
node_name=cfg_node, verbose=verbose, cmd="reclass-salt --top")
cmd = "salt -C " \
- "'I@salt:control or I@nova:compute or I@neutron:gateway' " \
+ "'I@salt:control or I@nova:compute or I@ceph:osd' " \
"cmd.run 'touch /run/is_rebooted'"
underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
@@ -333,25 +342,25 @@
underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
cmd = "salt --async -C " \
- "'I@neutron:gateway' cmd.run 'salt-call state.sls " \
+ "'I@ceph:osd' cmd.run 'salt-call state.sls " \
"linux.system.user,openssh,linux.network;reboot'"
underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
time.sleep(360) # TODO: Add ssh waiter
cmd = "salt -C " \
- "'I@salt:control or I@nova:compute or I@neutron:gateway'" \
+ "'I@salt:control or I@nova:compute or I@ceph:osd'" \
" test.ping"
underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
cmd = """salt -C """ \
- """'I@salt:control or I@nova:compute or I@neutron:gateway' """ \
+ """'I@salt:control or I@nova:compute or I@ceph:osd' """ \
"""cmd.run '[ -f "/run/is_rebooted" ] && """ \
"""echo "Has not been rebooted!" || echo "Rebooted"' """
ret = underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
count = Counter(ret['stdout_str'].split())
- assert count['Rebooted'] == 10, "Should be rebooted 10 baremetal nodes"
+ assert count['Rebooted'] == 13, "Should be rebooted 13 baremetal nodes"
underlay.check_call(
node_name=cfg_node,
@@ -383,10 +392,12 @@
password='r00tme')
params = jenkins.make_defults_params('deploy_openstack')
params['SALT_MASTER_URL'] = salt_api
+ params['STACK_INSTALL'] = \
+ 'core,kvm,ceph,cicd,openstack,stacklight,finalize'
build = jenkins.run_build('deploy_openstack', params)
jenkins.wait_end_of_build(
- name=build[0], build_id=build[1], timeout=60 * 60 * 2)
+ name=build[0], build_id=build[1], timeout=60 * 60 * 4)
with open("{path}/cfg01_jenkins_deploy_openstack_console.log".format(
path=settings.LOGS_DIR), 'w') as f:
@@ -419,13 +430,7 @@
cmd='salt "*" ssh.set_auth_key ubuntu '
'"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
- salt_nodes = salt_deployed.get_ssh_data()
- nodes_list = \
- [node for node in salt_nodes
- if not any(node['node_name'] == n['node_name']
- for n in config.underlay.ssh)]
- config.underlay.ssh = config.underlay.ssh + nodes_list
- underlay.add_config_ssh(nodes_list)
+ salt_deployed.update_ssh_data_from_minions()
time.sleep(120) # debug sleep
cmd = "salt '*' test.ping"
diff --git a/tcp_tests/tests/system/test_virtlet_actions.py b/tcp_tests/tests/system/test_virtlet_actions.py
index 83fd33a..d3b6c27 100644
--- a/tcp_tests/tests/system/test_virtlet_actions.py
+++ b/tcp_tests/tests/system/test_virtlet_actions.py
@@ -25,6 +25,7 @@
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
+ @pytest.mark.k8s_virtlet
def test_virtlet_create_delete_vm(self, show_step, config, k8s_deployed):
"""Test for deploying an mcp environment with virtlet
@@ -51,6 +52,7 @@
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
+ @pytest.mark.k8s_virtlet
def test_vm_resource_quotas(self, show_step, config, k8s_deployed):
"""Test for deploying a VM with specific quotas
diff --git a/tcp_tests/tests/unit/test_yaml_templates.py b/tcp_tests/tests/unit/test_yaml_templates.py
index 54eb5b3..a9f10fb 100644
--- a/tcp_tests/tests/unit/test_yaml_templates.py
+++ b/tcp_tests/tests/unit/test_yaml_templates.py
@@ -29,6 +29,16 @@
"tenant-pool01": "10.80.0.0/24",
"external-pool01": "10.90.0.0/24"
}
+config.underlay.dhcp_ranges = {
+ "admin-pool01": {"cidr": "10.70.0.0/24",
+ "start": "10.70.0.10",
+ "end": "10.70.0.200",
+ "gateway": "10.70.0.1"},
+ "external-pool01": {"cidr": "10.90.0.0/24",
+ "start": "10.90.0.10",
+ "end": "10.90.0.200",
+ "gateway": "10.90.0.1"},
+}
config.underlay.ssh_keys = [
{"public": "AAAARRRGGHHHhh", "private": "--- BLABLA-KEY ---"}
]
diff --git a/tcp_tests/utils/get_jenkins_job_stages.py b/tcp_tests/utils/get_jenkins_job_stages.py
index 143e1a2..883494f 100755
--- a/tcp_tests/utils/get_jenkins_job_stages.py
+++ b/tcp_tests/utils/get_jenkins_job_stages.py
@@ -15,6 +15,7 @@
import argparse
import os
import sys
+import time
sys.path.append(os.getcwd())
try:
@@ -107,10 +108,15 @@
for line in log["text"].splitlines()))
return res
- wf = jenkins.get_workflow(opts.job_name, opts.build_number)
- info = jenkins.build_info(opts.job_name, int(wf['id']))
+ for _ in range(3):
+ wf = jenkins.get_workflow(opts.job_name, opts.build_number)
+ info = jenkins.build_info(opts.job_name, int(wf['id']))
+ if info.get('result'):
+ break
+ time.sleep(3)
+
build_description = ("[" + info['fullDisplayName'] + "] " +
- info['url'] + " : " + info['result'])
+ info['url'] + " : " + (info['result'] or 'No result'))
stages = get_stages(wf['stages'], 0)
if not stages:
msg = wf['status'] + ":\n\n"
diff --git a/tcp_tests/utils/get_logs.py b/tcp_tests/utils/get_logs.py
new file mode 100755
index 0000000..225f9d7
--- /dev/null
+++ b/tcp_tests/utils/get_logs.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+
+import argparse
+import os
+import sys
+import time
+
+sys.path.append(os.getcwd())
+try:
+ from tcp_tests.fixtures import config_fixtures
+ from tcp_tests.managers import underlay_ssh_manager
+except ImportError:
+ print("ImportError: Run the application from the tcp-qa directory or "
+ "set the PYTHONPATH environment variable to directory which contains"
+ " ./tcp_tests")
+ sys.exit(1)
+
+
+def load_params():
+ """
+ Parse CLI arguments and environment variables
+
+ Returns: ArgumentParser instance
+ """
+ parser = argparse.ArgumentParser(description=(
+ 'Download logs and debug info from salt minions'
+ ))
+ default_name_prefix = 'logs_' + time.strftime("%Y%m%d_%H%M%S")
+ parser.add_argument('--archive-name-prefix',
+ help=('Custom prefix for creating archive name'),
+ default=default_name_prefix,
+ type=str)
+ return parser
+
+
+def main():
+ parser = load_params()
+ opts = parser.parse_args()
+
+ tests_configs = os.environ.get('TESTS_CONFIGS', None)
+ if not tests_configs or not os.path.isfile(tests_configs):
+ print("Download logs and debug info from salt minions. "
+ "Please set TESTS_CONFIGS environment variable whith"
+ "the path to INI file with lab metadata.")
+ return 11
+
+ config = config_fixtures.config()
+ underlay = underlay_ssh_manager.UnderlaySSHManager(config)
+
+ underlay.get_logs(opts.archive_name_prefix)
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tcp_tests/utils/run_jenkins_job.py b/tcp_tests/utils/run_jenkins_job.py
index 00ebec6..acc2e9f 100755
--- a/tcp_tests/utils/run_jenkins_job.py
+++ b/tcp_tests/utils/run_jenkins_job.py
@@ -131,13 +131,18 @@
if opts.verbose:
print_build_header(build, job_params, opts)
- jenkins.wait_end_of_build(
- name=build[0],
- build_id=build[1],
- timeout=opts.build_timeout,
- interval=1,
- verbose=opts.verbose,
- job_output_prefix=opts.job_output_prefix)
+ try:
+ jenkins.wait_end_of_build(
+ name=build[0],
+ build_id=build[1],
+ timeout=opts.build_timeout,
+ interval=1,
+ verbose=opts.verbose,
+ job_output_prefix=opts.job_output_prefix)
+ except Exception as e:
+ print(str(e))
+ raise
+
result = jenkins.build_info(name=build[0],
build_id=build[1])['result']
if opts.verbose: