Merge "Add test_run_cvp_stacklight"
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index de92a25..e8e88c2 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -7,10 +7,12 @@
if (env_manager == 'devops') {
jenkins_slave_node_name = "${NODE_NAME}"
+ node_with_reports = "${NODE_NAME}"
make_snapshot_stages = "${env.MAKE_SNAPSHOT_STAGES}" != "false" ? true : false
} else if (env_manager == 'heat') {
jenkins_slave_node_name = "openstack_slave_${JOB_NAME}"
make_snapshot_stages = false
+ node_with_reports = jenkins_slave_node_name
}
currentBuild.description = "${NODE_NAME}:${ENV_NAME}<br>"
@@ -139,9 +141,15 @@
// then archive artifacts also on that node
if (jenkins_slave_node_name != env.NODE_NAME) {
node ("${jenkins_slave_node_name}") {
- stage("Archive all xml reports from node ${}") {
+ stage("Archive all xml reports from node ${jenkins_slave_node_name}") {
archiveArtifacts artifacts: "**/*.xml,**/*.ini,**/*.log,**/*.tar.gz"
}
+ if ("${env.REPORT_TO_TESTRAIL}" != "false") {
+ stage("report results to testrail") {
+ common.printMsg("Running on: " + node_with_reports, "blue")
+ shared.swarm_testrail_report(steps, node_with_reports)
+ }
+ }
}
}
}
@@ -150,12 +158,21 @@
archiveArtifacts artifacts: "**/*.xml,**/*.ini,**/*.log,**/*.tar.gz"
}
if ("${env.REPORT_TO_TESTRAIL}" != "false") {
- stage("report results to testrail") {
- shared.swarm_testrail_report(steps)
+ stage("report results to testrail from jenkins master") {
+ common.printMsg("Running on: " + node_with_reports, "blue")
+ common.printMsg("Running on: " + env.NODE_NAME, "blue")
+ shared.verbose_sh("""\
+ [ -d /home/jenkins/venv_testrail_reporter ] || virtualenv /home/jenkins/venv_testrail_reporter""", true, false, true)
+ shared.run_cmd("""\
+ . /home/jenkins/venv_testrail_reporter/bin/activate; pip install git+https://github.com/dis-xcom/testrail_reporter -U""")
+ shared.swarm_testrail_report(steps, env.NODE_NAME)
}
stage("Store TestRail reports to job description") {
- def String description = readFile("description.txt")
- currentBuild.description += "${description}"
+ if (fileExists("jenkins_agent_description.txt")){
+
+ def String jenkins_agent_description = readFile("jenkins_agent_description.txt")
+ currentBuild.description += "${jenkins_agent_description}"
+ }
}
}
} // try
diff --git a/jobs/pipelines/packer-image-create.groovy b/jobs/pipelines/packer-image-create.groovy
new file mode 100644
index 0000000..94133a7
--- /dev/null
+++ b/jobs/pipelines/packer-image-create.groovy
@@ -0,0 +1,114 @@
+/**
+ *
+ * Deploy the product cluster using Jenkins master on CICD cluster
+ *
+ * Expected parameters:
+
+ * IMAGE_NAME Name of the resulting image in the Glance or in artifacts
+
+ * BUILD_CONFIG_DRIVE_PATH Relative path in tcp-qa to the directory with meta-data and user-data files
+ * BUILD_PACKER_CONFIG_PATH Relative path in tcp-qa to the file with packer config (packer.json)
+ * BASE_IMAGE_URL Base image to build a new image, in qcow2. For example, released ubuntu cloud image
+ * BASE_IMAGE_MD5 Base image MD5 checksum. Image will be re-downloaded if not match with the local image checksum
+
+ * PACKER_URL URL to the zip archive with packer binary, see https://releases.hashicorp.com/packer/
+ * PACKER_ZIP_MD5 MD5 of the zip archive with packer binary
+
+ * OS_AUTH_URL OpenStack keystone catalog URL
+ * OS_PROJECT_NAME OpenStack project (tenant) name
+ * OS_USER_DOMAIN_NAME OpenStack user domain name
+ * OS_CREDENTIALS OpenStack username and password credentials ID in Jenkins
+ * UPLOAD_IMAGE_TO_GLANCE If True: upload image to glance; if False: store as an artifact
+
+ * TCP_QA_REFS Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
+ */
+
+@Library('tcp-qa')_
+
+def common = new com.mirantis.mk.Common()
+def shared = new com.mirantis.system_qa.SharedPipeline()
+
+timeout(time: 6, unit: 'HOURS') {
+ node () {
+ try {
+
+ stage("Clean the environment and clone tcp-qa") {
+ deleteDir()
+ shared.run_cmd("""\
+ git clone https://github.com/Mirantis/tcp-qa.git ${WORKSPACE}
+ """)
+ shared.update_working_dir(false)
+ sh "mkdir ./tmp"
+ }
+
+ def packer_zipname = "/tmp/packer.zip"
+ def configdrive_isoname = "./tmp/config-drive.iso"
+
+ stage("Prepare Packer") {
+ // Check that the archive is already downloaded and has a correct checksum. Remove if not match
+ if (fileExists(packer_zipname)) {
+ sh(script: "bash -cex 'md5sum -c --status <(echo ${PACKER_ZIP_MD5} ${packer_zipname})' || rm -f ${packer_zipname}", returnStdout: true)
+ }
+ // If the file is missing or removed, then download it and check the checksum
+ if (!fileExists(packer_zipname)) {
+ sh(script: "wget --quiet -O ${packer_zipname} ${PACKER_URL}", returnStdout: true)
+ // Should fail the job if not match
+ sh(script: "bash -cex 'md5sum -c --status <(echo ${PACKER_ZIP_MD5} ${packer_zipname})'", returnStdout: true)
+ }
+ sh "unzip ${packer_zipname}"
+ }
+
+ stage("Build the cloudinit ISO") {
+ // Check that genisoimage is installed, or try to install it
+ sh "which genisoimage || sudo apt-get -y install genisoimage"
+ // Generate config-drive ISO
+ sh "mkisofs -o ${configdrive_isoname} -V cidata -r -J --quiet ${BUILD_CONFIG_DRIVE_PATH}"
+ }
+
+ stage("Build the image '${IMAGE_NAME}'") {
+ // Build the image
+ sh (script: """\
+ set -ex;
+ export PACKER_LOG=1;
+ export PACKER_CACHE_DIR='/tmp/packer_cache_${IMAGE_NAME}/';
+ mkdir -p \${PACKER_CACHE_DIR};
+ ./packer build -machine-readable -parallel=false -only='qemu' ${BUILD_PACKER_CONFIG_PATH};
+ """, returnStdout: true)
+ }
+
+
+ if (env.UPLOAD_IMAGE_TO_GLANCE) {
+
+ stage("Upload generated config drive ISO into volume on cfg01 node") {
+ withCredentials([
+ [$class : 'UsernamePasswordMultiBinding',
+ credentialsId : env.OS_CREDENTIALS,
+ passwordVariable: 'OS_PASSWORD',
+ usernameVariable: 'OS_USERNAME']
+ ]) {
+ env.OS_IDENTITY_API_VERSION = 3
+
+ def imagePath = "tmp/${IMAGE_NAME}/${IMAGE_NAME}.qcow2"
+ shared.run_cmd("""\
+ openstack --insecure image delete ${IMAGE_NAME} || true
+ sleep 3
+ openstack --insecure image create ${IMAGE_NAME} --file ${imagePath} --disk-format qcow2 --container-format bare
+ """)
+ }
+ }
+ } else {
+
+ stage("Archive artifacts") {
+ archiveArtifacts artifacts: "tmp/${IMAGE_NAME}/${IMAGE_NAME}.qcow2"
+ }
+ }
+
+ } catch (e) {
+ common.printMsg("Job is failed", "purple")
+ throw e
+ } finally {
+ // Remove the image after job is finished
+ sh "rm -f ./tmp/${IMAGE_NAME}.qcow2 || true"
+ } // try
+ } // node
+} // timeout
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
index 802a0a5..86b5122 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -21,6 +21,7 @@
* SHUTDOWN_ENV_ON_TEARDOWN optional, shutdown fuel-devops environment at the end of the job
* MCP_SALT_REPO_URL Base URL for MCP repositories required to bootstrap cfg01 node. Leave blank to use default
* (http://mirror.mirantis.com/ from mcp-common-scripts)
+ * JENKINS_PIPELINE_BRANCH Should be set in release/proposed/2019.2.0 when we test non-released version
* MCP_SALT_REPO_KEY URL of the key file. Leave blank to use default
* (${MCP_SALT_REPO_URL}/${MCP_VERSION}/salt-formulas/xenial/archive-salt-formulas.key from mcp-common-scripts)
*
@@ -69,6 +70,7 @@
export ENV_MANAGER=devops
export PYTHONIOENCODING=UTF-8
export REPOSITORY_SUITE=${MCP_VERSION}
+ export UPDATE_VERSION=${UPDATE_VERSION}
export TEST_GROUP=test_create_environment
export LOG_NAME=swarm_test_create_environment.log
py.test -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
@@ -122,6 +124,7 @@
export BOOTSTRAP_TIMEOUT=1800
export PYTHONIOENCODING=UTF-8
export REPOSITORY_SUITE=${MCP_VERSION}
+ export UPDATE_VERSION=${UPDATE_VERSION}
export TEST_GROUP=test_bootstrap_salt
export LOG_NAME=swarm_test_bootstrap_salt.log
py.test -vvv -s -p no:django -p no:ipdb --junit-xml=${xml_report_name} -k \${TEST_GROUP}
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
index 8c10291..0d497ab 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
@@ -27,6 +27,8 @@
* OS_PROJECT_NAME OpenStack project (tenant) name
* OS_USER_DOMAIN_NAME OpenStack user domain name
* OS_CREDENTIALS OpenStack username and password credentials ID in Jenkins
+ * JENKINS_PIPELINE_BRANCH Should be set in release/proposed/2019.2.0 when we test non-released version
+ * UPDATE_VERSION Version of update to deploy
* LAB_PARAM_DEFAULTS Filename placed in tcp_tests/templates/_heat_environments, with default parameters for the heat template
*
* CREATE_JENKINS_NODE_CREDENTIALS Jenkins username and password with rights to add/delete Jenkins agents
@@ -154,6 +156,7 @@
export SHUTDOWN_ENV_ON_TEARDOWN=false
export PYTHONIOENCODING=UTF-8
export REPOSITORY_SUITE=${MCP_VERSION}
+ export UPDATE_VERSION=${UPDATE_VERSION}
export ENV_NAME=${ENV_NAME}
export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
export LAB_PARAM_DEFAULTS=${LAB_PARAM_DEFAULTS}
@@ -254,6 +257,12 @@
stage("Clean the environment and clone tcp-qa") {
deleteDir()
shared.verbose_sh("""\
+ [ -d /home/jenkins/venv_testrail_reporter ] || virtualenv /home/jenkins/venv_testrail_reporter
+ """, true, false, true)
+ shared.run_cmd("""\
+ . /home/jenkins/venv_testrail_reporter/bin/activate; pip install git+https://github.com/dis-xcom/testrail_reporter -U
+ """)
+ shared.verbose_sh("""\
[ -d /home/jenkins/fuel-devops30 ] || virtualenv /home/jenkins/fuel-devops30
""", true, false, true)
shared.run_cmd("""\
@@ -283,6 +292,7 @@
export BOOTSTRAP_TIMEOUT=3600
export PYTHONIOENCODING=UTF-8
export REPOSITORY_SUITE=${MCP_VERSION}
+ export UPDATE_VERSION=${UPDATE_VERSION}
export TEST_GROUP=test_bootstrap_salt
export LOG_NAME=swarm_test_bootstrap_salt.log
py.test -vvv -s -p no:django -p no:ipdb --junit-xml=${xml_report_name} -k \${TEST_GROUP}
diff --git a/jobs/pipelines/swarm-create-cfg-config-drive.groovy b/jobs/pipelines/swarm-create-cfg-config-drive.groovy
index 2b78cc3..86b9eec 100644
--- a/jobs/pipelines/swarm-create-cfg-config-drive.groovy
+++ b/jobs/pipelines/swarm-create-cfg-config-drive.groovy
@@ -20,6 +20,8 @@
// smc['LOCAL_REPOS'] = 'true'
smc['MCP_SALT_REPO_KEY'] = "${MCP_SALT_REPO_KEY}"
smc['MCP_SALT_REPO_URL'] = "${MCP_SALT_REPO_URL}"
+smc['MCP_SALT_REPO_UPDATES'] = "${MCP_SALT_REPO_UPDATES}"
+
def entries(m) {
m.collect {k, v -> [k, v]}
@@ -95,6 +97,7 @@
""")
}
+
}
}
@@ -185,6 +188,8 @@
# export LOCAL_REPOS="true"
# export MCP_SALT_REPO_KEY="${MCP_SALT_REPO_KEY}"
# export MCP_SALT_REPO_URL="${MCP_SALT_REPO_URL}"
+# export MCP_SALT_REPO_UPDATES="${MCP_SALT_REPO_UPDATES}"
+# export ENABLE_MCP_SALT_REPO_UPDATES="true"
output:
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
index 0183016..dd2df11 100644
--- a/jobs/pipelines/swarm-deploy-cicd.groovy
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -52,6 +52,12 @@
stage("Run Jenkins job on salt-master [deploy_openstack:${env.STACK_INSTALL}]") {
shared.run_job_on_day01_node(env.STACK_INSTALL, install_timeout)
}
+ stage("Create env_jenkins_cicd and env_k8s files") {
+ shared.run_cmd("""\
+ export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini
+ python ./tcp_tests/utils/create_env_jenkins_cicd.py
+ """)
+ }
for (stack in "${env.STACK_INSTALL}".split(",")) {
stage("Sanity check the deployed component [${stack}]") {
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index d403861..fdcb2c9 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -67,7 +67,7 @@
shared.run_sh(sources + installed + """
export TESTS_CONFIGS=${ENV_NAME}_salt_deployed.ini
- export ENV_MANAGER=devops # use 'hardware' fixture to manage fuel-devops environment
+ export ENV_MANAGER=$ENV_MANAGER # use 'hardware' fixture to manage fuel-devops environment
export salt_master_host=\$SALT_MASTER_IP # skip salt_deployed fixture
export salt_master_port=6969
export SALT_USER=\$SALTAPI_USER
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index d74b600..300aad5 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -185,25 +185,30 @@
""")
}
-def update_working_dir() {
+def update_working_dir(Boolean updateRequirements=true) {
// Use to fetch a patchset from gerrit to the working dir
run_cmd("""\
if [ -n "$TCP_QA_REFS" ]; then
set -e
git fetch https://review.gerrithub.io/Mirantis/tcp-qa $TCP_QA_REFS && git checkout FETCH_HEAD || exit \$?
- fi
- pip install -r tcp_tests/requirements.txt
- """)
+ fi""")
+ if (updateRequirements) {
+ run_cmd("""\
+ pip install -r tcp_tests/requirements.txt
+ """)
+ }
}
def swarm_bootstrap_salt_cluster_devops() {
def common = new com.mirantis.mk.Common()
def cookiecutter_template_commit = env.COOKIECUTTER_TEMPLATE_COMMIT ?: is_released_version(env.MCP_VERSION) ? "release/${env.MCP_VERSION}" : 'master'
def salt_models_system_commit = env.SALT_MODELS_SYSTEM_COMMIT ?: "release/${env.MCP_VERSION}"
+ def jenkins_pipelines_branch = env.JENKINS_PIPELINE_BRANCH ?: ''
def tcp_qa_refs = env.TCP_QA_REFS ?: ''
def mk_pipelines_ref = env.MK_PIPELINES_REF ?: ''
def pipeline_library_ref = env.PIPELINE_LIBRARY_REF ?: ''
def cookiecutter_ref_change = env.COOKIECUTTER_REF_CHANGE ?: ''
+ def mcp_common_scripts_refs = env.MCP_COMMON_SCRIPTS_REFS ?: ''
def environment_template_ref_change = env.ENVIRONMENT_TEMPLATE_REF_CHANGE ?: ''
def mcp_salt_repo_url = env.MCP_SALT_REPO_URL ?: ''
def mcp_salt_repo_key = env.MCP_SALT_REPO_KEY ?: ''
@@ -213,6 +218,7 @@
def env_lab_mgm_iface = env.LAB_MANAGEMENT_IFACE ?: ''
def env_lab_ctl_iface = env.LAB_CONTROL_IFACE ?: ''
def update_repo_custom_tag = env.UPDATE_REPO_CUSTOM_TAG ?: ''
+ def update_version = env.UPDATE_VERSION ?: ''
def parameters = [
string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
string(name: 'PARENT_WORKSPACE', value: pwd()),
@@ -237,6 +243,9 @@
string(name: 'LAB_MANAGEMENT_IFACE', value: env_lab_mgm_iface),
string(name: 'LAB_CONTROL_IFACE', value: env_lab_ctl_iface),
string(name: 'UPDATE_REPO_CUSTOM_TAG', value: "${update_repo_custom_tag}"),
+ string(name: 'JENKINS_PIPELINE_BRANCH', value: "${jenkins_pipelines_branch}"),
+ string(name: 'MCP_COMMON_SCRIPTS_REFS', value: "${mcp_common_scripts_refs}"),
+ string(name: 'UPDATE_VERSION', value: "${update_version}"),
booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
]
@@ -248,8 +257,10 @@
def common = new com.mirantis.mk.Common()
def cookiecutter_template_commit = env.COOKIECUTTER_TEMPLATE_COMMIT ?: "release/${env.MCP_VERSION}"
def salt_models_system_commit = env.SALT_MODELS_SYSTEM_COMMIT ?: "release/${env.MCP_VERSION}"
+ def mcp_common_scripts_refs = env.MCP_COMMON_SCRIPTS_REFS ?: ''
def tcp_qa_refs = env.TCP_QA_REFS ?: ''
def mk_pipelines_ref = env.MK_PIPELINES_REF ?: ''
+ def jenkins_pipelines_branch = env.JENKINS_PIPELINE_BRANCH ?: ''
def pipeline_library_ref = env.PIPELINE_LIBRARY_REF ?: ''
def cookiecutter_ref_change = env.COOKIECUTTER_REF_CHANGE ?: ''
def environment_template_ref_change = env.ENVIRONMENT_TEMPLATE_REF_CHANGE ?: ''
@@ -260,6 +271,7 @@
def env_lab_mgm_iface = env.LAB_MANAGEMENT_IFACE ?: ''
def env_lab_ctl_iface = env.LAB_CONTROL_IFACE ?: ''
def update_repo_custom_tag = env.UPDATE_REPO_CUSTOM_TAG ?: ''
+ def update_version = env.UPDATE_VERSION ?: ''
def parameters = [
string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
string(name: 'JENKINS_SLAVE_NODE_NAME', value: jenkins_slave_node_name),
@@ -279,6 +291,8 @@
string(name: 'ENVIRONMENT_TEMPLATE_REF_CHANGE', value: "${environment_template_ref_change}"),
string(name: 'MCP_SALT_REPO_URL', value: "${mcp_salt_repo_url}"),
string(name: 'MCP_SALT_REPO_KEY', value: "${mcp_salt_repo_key}"),
+ string(name: 'MCP_COMMON_SCRIPTS_REFS', value: "${mcp_common_scripts_refs}"),
+ string(name: 'UPDATE_VERSION', value: "${update_version}"),
string(name: 'IPMI_USER', value: env_ipmi_user),
string(name: 'IPMI_PASS', value: env_ipmi_pass),
string(name: 'LAB_MANAGEMENT_IFACE', value: env_lab_mgm_iface),
@@ -289,6 +303,7 @@
string(name: 'OS_USER_DOMAIN_NAME', value: "${OS_USER_DOMAIN_NAME}"),
string(name: 'OS_CREDENTIALS', value: "${OS_CREDENTIALS}"),
string(name: 'LAB_PARAM_DEFAULTS', value: "${LAB_PARAM_DEFAULTS}"),
+ string(name: 'JENKINS_PIPELINE_BRANCH', value: "${jenkins_pipelines_branch}"),
booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
]
@@ -375,7 +390,7 @@
parameters: parameters
}
-def swarm_testrail_report(String passed_steps) {
+def swarm_testrail_report(String passed_steps, String node_with_reports) {
// Run pytest tests
def common = new com.mirantis.mk.Common()
def tcp_qa_refs = env.TCP_QA_REFS ?: ''
@@ -386,7 +401,7 @@
string(name: 'LAB_CONFIG_NAME', value: "${LAB_CONFIG_NAME}"),
string(name: 'MCP_VERSION', value: "${MCP_VERSION}"),
string(name: 'PASSED_STEPS', value: passed_steps),
- string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
+ string(name: 'PARENT_NODE_NAME', value: node_with_reports),
string(name: 'PARENT_WORKSPACE', value: pwd()),
string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
string(name: 'TEMPEST_TEST_SUITE_NAME', value: "${tempest_test_suite_name}"),
@@ -410,7 +425,9 @@
def tcp_qa_refs = env.TCP_QA_REFS ?: ''
def environment_template_ref_change = env.ENVIRONMENT_TEMPLATE_REF_CHANGE ?: ''
def cookiecutter_ref_change = env.COOKIECUTTER_REF_CHANGE ?: ''
+ def jenkins_pipelines_branch=env.JENKINS_PIPELINE_BRANCH ?: ''
def update_repo_custom_tag = env.UPDATE_REPO_CUSTOM_TAG ?: ''
+ def update_version = env.UPDATE_VERSION ?: ''
def parameters = [
string(name: 'LAB_CONTEXT_NAME', value: "${LAB_CONFIG_NAME}"),
@@ -429,7 +446,10 @@
string(name: 'IPMI_USER', value: env.IPMI_USER),
string(name: 'IPMI_PASS', value: env.IPMI_PASS),
string(name: 'UPDATE_REPO_CUSTOM_TAG', value: "${update_repo_custom_tag}"),
+ string(name: 'JENKINS_PIPELINE_BRANCH', value: "${jenkins_pipelines_branch}"),
string(name: 'IMAGE_PATH_CFG01_DAY01', value: env.IMAGE_PATH_CFG01_DAY01),
+ string(name: 'UPDATE_VERSION', value: "${update_version}"),
+
]
build_shell_job('swarm-cookied-model-generator', parameters, "deploy_generate_model.xml")
@@ -441,15 +461,17 @@
println("ADMIN_NETWORK_GW=" + ADMIN_NETWORK_GW)
def mk_pipelines_ref = env.MK_PIPELINES_REF ?: ''
+ def mcp_common_scripts_ref = env.MCP_COMMON_SCRIPTS_REFS ?: ''
def pipeline_library_ref = env.PIPELINE_LIBRARY_REF ?: ''
def tcp_qa_refs = env.TCP_QA_REFS ?: ''
+ def update_version = env.UPDATE_VERSION?: 'proposed'
def mcp_salt_repo_url = env.MCP_SALT_REPO_URL ?: ''
def mcp_salt_repo_key = env.MCP_SALT_REPO_KEY ?: ''
def deploy_network_mask = env.DEPLOY_NETWORK_NETMASK ?: ''
def parameters = [
string(name: 'CLUSTER_NAME', value: "${LAB_CONFIG_NAME}"),
- string(name: 'MODEL_URL', value: "http://cz8133.bud.mirantis.net:8098/${LAB_CONFIG_NAME}.git"),
+ string(name: 'MODEL_URL', value: "http://172.19.112.216:8098/${LAB_CONFIG_NAME}.git"),
string(name: 'MODEL_URL_OBJECT_TYPE', value: "git"),
booleanParam(name: 'DOWNLOAD_CONFIG_DRIVE', value: true),
string(name: 'MCP_VERSION', value: "${MCP_VERSION}"),
@@ -466,6 +488,9 @@
string(name: 'PIPELINE_LIBRARY_REF', value: "${pipeline_library_ref}"),
string(name: 'MK_PIPELINES_REF', value: "${mk_pipelines_ref}"),
string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
+ string(name: 'UPDATE_VERSION', value: "${update_version}"),
+ string(name: 'MCP_COMMON_SCRIPTS_REFS', value: "${mcp_common_scripts_ref}"),
+ string(name: 'MCP_SALT_REPO_UPDATES', value: "'deb [arch=amd64] http://mirror.mirantis.com/update/${UPDATE_VERSION}/salt-formulas/xenial xenial main'"),
]
build_pipeline_job('swarm-create-cfg-config-drive', parameters)
}
@@ -643,7 +668,7 @@
def testPlanDesc = env.LAB_CONFIG_NAME
def testrailURL = "https://mirantis.testrail.com"
def testrailProject = "Mirantis Cloud Platform"
- def testPlanNamePrefix = env.TEST_PLAN_NAME_PREFIX ?: "[MCP-Q2]System"
+ def testPlanNamePrefix = env.TEST_PLAN_NAME_PREFIX ?: "[2019.2.0-update]System"
def testPlanName = "${testPlanNamePrefix}-${MCP_VERSION}-${new Date().format('yyyy-MM-dd')}"
def testrailMilestone = "MCP1.1"
def testrailCaseMaxNameLenght = 250
diff --git a/tcp_tests/helpers/env_config.py b/tcp_tests/helpers/env_config.py
index 5dbc87d..b811030 100644
--- a/tcp_tests/helpers/env_config.py
+++ b/tcp_tests/helpers/env_config.py
@@ -150,7 +150,7 @@
:param path: string
:returns: key string, indexes list
"""
- pattern = re.compile("\[([0-9]*)\]")
+ pattern = re.compile(r"\[([0-9]*)\]")
# find all indexes of possible list object in path
indexes = (lambda x: [int(r) for r in pattern.findall(x)]
if pattern.search(x) else [])
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 0e57c89..5f5f4e8 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -252,22 +252,22 @@
if self.__config.k8s.run_extended_virtlet_conformance:
ci_image = "cloud-images.ubuntu.com/xenial/current/" \
"xenial-server-cloudimg-amd64-disk1.img"
- cmd = ("set -o pipefail; "
- "docker run --net=host {0} /virtlet-e2e-tests "
- "-include-cloud-init-tests -junitOutput {3} "
- "-image {2} -sshuser ubuntu -memoryLimit 1024 "
- "-alsologtostderr -cluster-url http://127.0.0.1:8080 "
- "-ginkgo.focus '\[Conformance\]' "
- "| tee {1}".format(
+ cmd = (r"set -o pipefail; "
+ r"docker run --net=host {0} /virtlet-e2e-tests "
+ r"-include-cloud-init-tests -junitOutput {3} "
+ r"-image {2} -sshuser ubuntu -memoryLimit 1024 "
+ r"-alsologtostderr -cluster-url http://127.0.0.1:8080 "
+ r"-ginkgo.focus '\[Conformance\]' "
+ r"| tee {1}".format(
self.__config.k8s_deploy.kubernetes_virtlet_image,
log_file, ci_image, report_name))
else:
- cmd = ("set -o pipefail; "
- "docker run --net=host {0} /virtlet-e2e-tests "
- "-junitOutput {2} "
- "-alsologtostderr -cluster-url http://127.0.0.1:8080 "
- "-ginkgo.focus '\[Conformance\]' "
- "| tee {1}".format(
+ cmd = (r"set -o pipefail; "
+ r"docker run --net=host {0} /virtlet-e2e-tests "
+ r"-junitOutput {2} "
+ r"-alsologtostderr -cluster-url http://127.0.0.1:8080 "
+ r"-ginkgo.focus '\[Conformance\]' "
+ r"| tee {1}".format(
self.__config.k8s_deploy.kubernetes_virtlet_image,
log_file, report_name))
LOG.info("Executing: {}".format(cmd))
diff --git a/tcp_tests/managers/runtestmanager.py b/tcp_tests/managers/runtestmanager.py
index bc71427..eb646d1 100644
--- a/tcp_tests/managers/runtestmanager.py
+++ b/tcp_tests/managers/runtestmanager.py
@@ -123,8 +123,17 @@
barbican_integration = self.__salt_api.get_single_pillar(
tgt="ctl01*",
pillar="_param:barbican_integration_enabled")
+ if self.__salt_api.local('I@opencontrail:compute:enabled:true',
+ 'match.pillar',
+ 'opencontrail:compute:enabled:true'
+ ).get('return', [{}]) != [{}]:
+ contrail_integration = True
+ else:
+ contrail_integration = False
LOG.info("Barbican integration {0}".format(barbican_integration))
+ LOG.info("Opencontrail integration {0}".format(contrail_integration))
+
commands = [
{
'description': ("Install docker-ce package and "
@@ -147,6 +156,62 @@
"runtest.orchestrate.tempest")},
]
+ if contrail_integration:
+ vsrx_router = self.__salt_api.get_single_pillar(
+ tgt="I@opencontrail:control:role:primary",
+ pillar="_param:opencontrail_router01_address")
+ public_network = "192.168.200.0"
+ contrail_commands = [
+ {
+ 'description': "Iproute to vsrx router",
+ 'node_name': self.target_name,
+ 'cmd': ("set -ex; ip route replace " +
+ public_network + "/24 via " + vsrx_router)},
+ {
+ 'description': "Align security group: remove all rules",
+ 'node_name': self.target_name,
+ 'cmd': ("set -ex;" +
+ "salt 'ctl01*' cmd.run '. /root/keystonercv3; " +
+ "openstack security group rule list --column ID " +
+ "-f value | xargs " +
+ "openstack security group rule delete|true';")},
+ {
+ 'description': "Align security group: remove all default",
+ 'node_name': self.target_name,
+ 'cmd': ("set -ex;" +
+ " salt ctl01* cmd.run '. /root/keystonercv3; " +
+ "openstack security group " +
+ "list --column ID --column Name -f value|" +
+ "grep default|cut -d \" \" -f 1|" +
+ "xargs openstack security group delete|true'")},
+ {
+ 'description': "Align security group: add rules",
+ 'node_name': self.target_name,
+ 'cmd': ("set -ex;" +
+ "salt 'ctl01*' cmd.run '. /root/keystonercv3; " +
+ "openstack security group rule create default " +
+ "--egress --protocol tcp'; " +
+ "salt 'ctl01*' cmd.run '. /root/keystonercv3; " +
+ "openstack security group rule create default " +
+ "--ingress --protocol tcp'; " +
+ "salt 'ctl01*' cmd.run '. /root/keystonercv3; " +
+ "openstack security group rule create default " +
+ "--egress --protocol icmp'; " +
+ "salt 'ctl01*' cmd.run '. /root/keystonercv3; " +
+ "openstack security group rule create default " +
+ "--ingress --protocol icmp'; ")},
+ {
+ 'description': "Create public network with target",
+ 'node_name': self.target_name,
+ 'cmd': ("set -ex;" +
+ "salt -C 'I@opencontrail:control:role:primary' " +
+ "contrail.virtual_network_create public " +
+ "'{\"external\":true,\"ip_prefix\":\"" +
+ public_network + "\",\"ip_prefix_len\":24," +
+ "\"asn\":64512,\"target\":10000}'")},
+ ]
+ commands = contrail_commands + commands
+
if barbican_integration:
commands.append({
'description': "Configure barbican",
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 68fb5e6..a33bb11 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -63,8 +63,8 @@
label="Install and configure salt")
self.create_env_salt()
self.create_env_jenkins_day01()
- self.create_env_jenkins_cicd()
- self.create_env_k8s()
+ # self.create_env_jenkins_cicd()
+ # self.create_env_k8s()
def change_creds(self, username, password):
self.__user = username
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 6b5bebb..c2e82d4 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -414,38 +414,40 @@
# Prefix each '$' symbol with backslash '\' to disable
# early interpolation of environment variables on cfg01 node only
dump_commands = (
- "mkdir /root/\$(hostname -f)/;"
- "rsync -aruv /var/log/ /root/\$(hostname -f)/;"
- "dpkg -l > /root/\$(hostname -f)/dump_dpkg_l.txt;"
- "df -h > /root/\$(hostname -f)/dump_df.txt;"
- "mount > /root/\$(hostname -f)/dump_mount.txt;"
- "blkid -o list > /root/\$(hostname -f)/dump_blkid_o_list.txt;"
- "iptables -t nat -S > /root/\$(hostname -f)/dump_iptables_nat.txt;"
- "iptables -S > /root/\$(hostname -f)/dump_iptables.txt;"
- "ps auxwwf > /root/\$(hostname -f)/dump_ps.txt;"
- "docker images > /root/\$(hostname -f)/dump_docker_images.txt;"
- "docker ps > /root/\$(hostname -f)/dump_docker_ps.txt;"
- "docker service ls > "
- " /root/\$(hostname -f)/dump_docker_services_ls.txt;"
- "for SERVICE in \$(docker service ls | awk '{ print \$2 }'); "
- " do docker service ps --no-trunc 2>&1 \$SERVICE >> "
- " /root/\$(hostname -f)/dump_docker_service_ps.txt;"
- " done;"
- "for SERVICE in \$(docker service ls | awk '{ print \$2 }'); "
- " do timeout 30 docker service logs --no-trunc 2>&1 \$SERVICE > "
- " /root/\$(hostname -f)/dump_docker_service_\${SERVICE}_logs;"
- " done;"
- "vgdisplay > /root/\$(hostname -f)/dump_vgdisplay.txt;"
- "lvdisplay > /root/\$(hostname -f)/dump_lvdisplay.txt;"
- "ip a > /root/\$(hostname -f)/dump_ip_a.txt;"
- "ip r > /root/\$(hostname -f)/dump_ip_r.txt;"
- "netstat -anp > /root/\$(hostname -f)/dump_netstat.txt;"
- "brctl show > /root/\$(hostname -f)/dump_brctl_show.txt;"
- "arp -an > /root/\$(hostname -f)/dump_arp.txt;"
- "uname -a > /root/\$(hostname -f)/dump_uname_a.txt;"
- "lsmod > /root/\$(hostname -f)/dump_lsmod.txt;"
- "cat /proc/interrupts > /root/\$(hostname -f)/dump_interrupts.txt;"
- "cat /etc/*-release > /root/\$(hostname -f)/dump_release.txt;"
+ r"mkdir /root/\$(hostname -f)/;"
+ r"rsync -aruv /var/log/ /root/\$(hostname -f)/;"
+ r"dpkg -l > /root/\$(hostname -f)/dump_dpkg_l.txt;"
+ r"df -h > /root/\$(hostname -f)/dump_df.txt;"
+ r"mount > /root/\$(hostname -f)/dump_mount.txt;"
+ r"blkid -o list > /root/\$(hostname -f)/dump_blkid_o_list.txt;"
+ r"iptables -t nat -S > "
+ r" /root/\$(hostname -f)/dump_iptables_nat.txt;"
+ r"iptables -S > /root/\$(hostname -f)/dump_iptables.txt;"
+ r"ps auxwwf > /root/\$(hostname -f)/dump_ps.txt;"
+ r"docker images > /root/\$(hostname -f)/dump_docker_images.txt;"
+ r"docker ps > /root/\$(hostname -f)/dump_docker_ps.txt;"
+ r"docker service ls > "
+ r" /root/\$(hostname -f)/dump_docker_services_ls.txt;"
+ r"for SERVICE in \$(docker service ls | awk '{ print \$2 }'); "
+ r" do docker service ps --no-trunc 2>&1 \$SERVICE >> "
+ r" /root/\$(hostname -f)/dump_docker_service_ps.txt;"
+ r" done;"
+ r"for SERVICE in \$(docker service ls | awk '{ print \$2 }'); "
+ r" do timeout 30 docker service logs --no-trunc 2>&1 \$SERVICE > "
+ r" /root/\$(hostname -f)/dump_docker_service_\${SERVICE}_logs;"
+ r" done;"
+ r"vgdisplay > /root/\$(hostname -f)/dump_vgdisplay.txt;"
+ r"lvdisplay > /root/\$(hostname -f)/dump_lvdisplay.txt;"
+ r"ip a > /root/\$(hostname -f)/dump_ip_a.txt;"
+ r"ip r > /root/\$(hostname -f)/dump_ip_r.txt;"
+ r"netstat -anp > /root/\$(hostname -f)/dump_netstat.txt;"
+ r"brctl show > /root/\$(hostname -f)/dump_brctl_show.txt;"
+ r"arp -an > /root/\$(hostname -f)/dump_arp.txt;"
+ r"uname -a > /root/\$(hostname -f)/dump_uname_a.txt;"
+ r"lsmod > /root/\$(hostname -f)/dump_lsmod.txt;"
+ r"cat /proc/interrupts > "
+ r" /root/\$(hostname -f)/dump_interrupts.txt;"
+ r"cat /etc/*-release > /root/\$(hostname -f)/dump_release.txt;"
# OpenStack specific, will fail on other nodes
# "rabbitmqctl report > "
# " /root/\$(hostname -f)/dump_rabbitmqctl.txt;"
@@ -466,16 +468,22 @@
# " do echo Namespace: \${ns}; ip netns exec \${ns} netstat -anp;"
# "done > /root/\$(hostname -f)/dump_netstat_ns.txt;"
- "/usr/bin/haproxy-status.sh > "
- " /root/\$(hostname -f)/dump_haproxy.txt;"
+ r"/usr/bin/haproxy-status.sh > "
+ r" /root/\$(hostname -f)/dump_haproxy.txt;"
# Archive the files
- "cd /root/; tar --absolute-names --warning=no-file-changed "
- " -czf \$(hostname -f).tar.gz ./\$(hostname -f)/;"
+ r"cd /root/; tar --absolute-names --warning=no-file-changed "
+ r" -czf \$(hostname -f).tar.gz ./\$(hostname -f)/;"
)
master_host = self.__config.salt.salt_master_host
with self.remote(host=master_host) as master:
+ LOG.info("Make sure that 'rsync' is installed on all nodes")
+ master.check_call("salt '*' cmd.run "
+ " 'apt-get -qq install -y rsync'",
+ raise_on_err=False,
+ timeout=240)
+
# dump files
LOG.info("Archive artifacts on all nodes")
master.check_call('salt "*" cmd.run "{0}"'.format(dump_commands),
diff --git a/tcp_tests/report.py b/tcp_tests/report.py
index 0bec6ef..8a2a58e 100644
--- a/tcp_tests/report.py
+++ b/tcp_tests/report.py
@@ -188,7 +188,7 @@
LOG.info("Get results for run - {}".format(run.name))
results = t_client.results(run, result_type)
results_with_test = []
- if result_type is '5':
+ if result_type == '5':
ret = [(run, r) for r in results
if r.raw_data()['status_id'] is int(result_type) and
r.raw_data()['defects'] is None]
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index e8968be..83968e2 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -81,7 +81,7 @@
'docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest') # noqa
TEMPEST_IMAGE_VERSION = os.environ.get('TEMPEST_IMAGE_VERSION', 'pike')
TEMPEST_PATTERN = os.environ.get('TEMPEST_PATTERN', 'tempest')
-TEMPEST_TIMEOUT = int(os.environ.get('TEMPEST_TIMEOUT', 60 * 60 * 6))
+TEMPEST_TIMEOUT = int(os.environ.get('TEMPEST_TIMEOUT', 60 * 60 * 10))
TEMPEST_THREADS = int(os.environ.get('TEMPEST_THREADS', 2))
TEMPEST_EXTRA_ARGS = os.environ.get('TEMPEST_EXTRA_ARGS', '')
TEMPEST_TARGET = os.environ.get('TEMPEST_TARGET', 'gtw01')
diff --git a/tcp_tests/templates/_heat_environments/eu-cloud.env b/tcp_tests/templates/_heat_environments/eu-cloud.env
index 212bf23..e053283 100644
--- a/tcp_tests/templates/_heat_environments/eu-cloud.env
+++ b/tcp_tests/templates/_heat_environments/eu-cloud.env
@@ -7,30 +7,38 @@
"MCP::Networks": fragments/Networks.yaml
"MCP::SingleInstance": fragments/Instance.yaml
"MCP::FoundationNode": fragments/FoundationNode.yaml
+ "MCP::VsrxNode": fragments/VsrxNode.yaml
parameter_defaults:
cfg_flavor: system.virtual.salt_master
- ctl_flavor: system.golden.openstack.control
- cid_flavor: system.golden.cicd.control
+ ctl_flavor: system.compact.openstack.control
+ cid_flavor: system.compact.cicd.control
ntw_flavor: system.compact.opencontrail.control
nal_flavor: system.compact.opencontrail.analytics
- dbs_flavor: system.golden.openstack.database
- msg_flavor: system.golden.openstack.message_queue
- mon_flavor: system.golden.stacklight.server
- log_flavor: system.golden.stacklight.log
- mtr_flavor: system.golden.stacklight.telemetry
+ dbs_flavor: system.compact.openstack.database
+ msg_flavor: system.compact.openstack.message_queue
+ mon_flavor: system.compact.stacklight.server
+ log_flavor: system.compact.stacklight.log
+ mtr_flavor: system.compact.stacklight.telemetry
cmp_flavor: system.virtual.openstack.compute
- cmn_flavor: system.golden.ceph.mon
- rgw_flavor: system.golden.ceph.rgw
+ cmn_flavor: system.compact.ceph.mon
+ rgw_flavor: system.compact.ceph.rgw
osd_flavor: system.virtual.openstack.compute
+ dns_flavor: system.compact.openstack.dns
+ kmn_flavor: system.compact.openstack.barbican
+ prx_flavor: system.compact.openstack.proxy
+ gtw_flavor: system.compact.openstack.gateway
kvm_fake_flavor: system.virtual.fake_kvm
foundation_flavor: system.virtual.foundation
+ vsrx_flavor: oc_vsrx
key_pair: system_key_8133
net_public: public
+ foundation_image: system.foundation
+
nameservers: 172.18.208.44
control_subnet_cidr: "10.6.0.0/24"
tenant_subnet_cidr: "10.8.0.0/24"
diff --git a/tcp_tests/templates/_heat_environments/fragments/Compute.yaml b/tcp_tests/templates/_heat_environments/fragments/Compute.yaml
index 6b4c0c7..40ff833 100644
--- a/tcp_tests/templates/_heat_environments/fragments/Compute.yaml
+++ b/tcp_tests/templates/_heat_environments/fragments/Compute.yaml
@@ -19,12 +19,19 @@
type: string
control_net_static_ip:
type: string
+ tenant_net_static_ip:
+ type: string
+ external_net_static_ip:
+ type: string
underlay_userdata:
type: string
mcp_version:
type: string
env_name:
type: string
+ role:
+ type: comma_delimited_list
+ default: [salt_minion]
resources:
instance_port01:
@@ -44,6 +51,15 @@
properties:
port_security_enabled: false
network_id: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: tenant_net_static_ip }
+ instance_port04:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'external_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: external_net_static_ip }
instance_instance:
type: OS::Nova::Server
@@ -60,6 +76,7 @@
- port: { get_resource: instance_port01 }
- port: { get_resource: instance_port02 }
- port: { get_resource: instance_port03 }
+ - port: { get_resource: instance_port04 }
block_device_mapping_v2:
- device_name: /dev/vdb
device_type: disk
@@ -79,21 +96,7 @@
$node_domain: { get_param: instance_domain }
$config_host: { get_param: instance_config_host }
metadata:
- roles:
- - salt_minion
-
- floating_ip:
- depends_on: [instance_instance]
- type: OS::Neutron::FloatingIP
- properties:
- floating_network: { get_param: net_public }
- port_id: { get_resource: instance_port01 }
- floating_ip_association:
- depends_on: [floating_ip]
- type: OS::Neutron::FloatingIPAssociation
- properties:
- floatingip_id: { get_resource: floating_ip }
- port_id: { get_resource: instance_port01 }
+ roles: { get_param: role }
outputs:
instance_address:
diff --git a/tcp_tests/templates/_heat_environments/fragments/FoundationNode.yaml b/tcp_tests/templates/_heat_environments/fragments/FoundationNode.yaml
index 91f058a..5b2c2d4 100644
--- a/tcp_tests/templates/_heat_environments/fragments/FoundationNode.yaml
+++ b/tcp_tests/templates/_heat_environments/fragments/FoundationNode.yaml
@@ -7,6 +7,8 @@
type: string
instance_flavor:
type: string
+ instance_image:
+ type: string
instance_name:
type: string
instance_config_host:
@@ -19,12 +21,19 @@
type: string
control_net_static_ip:
type: string
+ tenant_net_static_ip:
+ type: string
+ external_net_static_ip:
+ type: string
underlay_userdata:
type: string
env_name:
type: string
mcp_version:
type: string
+ role:
+ type: comma_delimited_list
+ default: [foundation_jenkins_slave]
resources:
instance_port01:
@@ -44,18 +53,22 @@
properties:
port_security_enabled: false
network_id: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: tenant_net_static_ip }
instance_port04:
type: OS::Neutron::Port
properties:
port_security_enabled: false
network_id: { list_join: ['-', [ 'external_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: external_net_static_ip }
instance_instance:
type: OS::Nova::Server
properties:
image_update_policy: REBUILD
flavor: { get_param: instance_flavor }
- image: { list_join: ['', [ 'ubuntu-16.04-foundation-', { get_param: mcp_version } ]] }
+ image: { get_param: instance_image }
key_name: { get_param: key_pair }
name:
list_join:
@@ -78,8 +91,7 @@
$node_domain: { get_param: instance_domain }
$config_host: { get_param: instance_config_host }
metadata:
- roles:
- - foundation_jenkins_slave
+ roles: { get_param: role }
floating_ip:
depends_on: [instance_instance]
diff --git a/tcp_tests/templates/_heat_environments/fragments/Instance.yaml b/tcp_tests/templates/_heat_environments/fragments/Instance.yaml
index 1c9be45..5ead2ed 100644
--- a/tcp_tests/templates/_heat_environments/fragments/Instance.yaml
+++ b/tcp_tests/templates/_heat_environments/fragments/Instance.yaml
@@ -19,12 +19,19 @@
type: string
control_net_static_ip:
type: string
+ tenant_net_static_ip:
+ type: string
+ external_net_static_ip:
+ type: string
underlay_userdata:
type: string
mcp_version:
type: string
env_name:
type: string
+ role:
+ type: comma_delimited_list
+ default: [salt_minion]
resources:
instance_port01:
@@ -44,6 +51,15 @@
properties:
port_security_enabled: false
network_id: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: tenant_net_static_ip }
+ instance_port04:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'external_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: external_net_static_ip }
instance_instance:
type: OS::Nova::Server
@@ -60,6 +76,7 @@
- port: { get_resource: instance_port01 }
- port: { get_resource: instance_port02 }
- port: { get_resource: instance_port03 }
+ - port: { get_resource: instance_port04 }
user_data_format: RAW
user_data:
str_replace:
@@ -72,21 +89,7 @@
$node_domain: { get_param: instance_domain }
$config_host: { get_param: instance_config_host }
metadata:
- roles:
- - salt_minion
-
- floating_ip:
- depends_on: [instance_instance]
- type: OS::Neutron::FloatingIP
- properties:
- floating_network: { get_param: net_public }
- port_id: { get_resource: instance_port01 }
- floating_ip_association:
- depends_on: [floating_ip]
- type: OS::Neutron::FloatingIPAssociation
- properties:
- floatingip_id: { get_resource: floating_ip }
- port_id: { get_resource: instance_port01 }
+ roles: { get_param: role }
outputs:
instance_address:
diff --git a/tcp_tests/templates/_heat_environments/fragments/MasterNode.yaml b/tcp_tests/templates/_heat_environments/fragments/MasterNode.yaml
index 410deb6..0d85600 100644
--- a/tcp_tests/templates/_heat_environments/fragments/MasterNode.yaml
+++ b/tcp_tests/templates/_heat_environments/fragments/MasterNode.yaml
@@ -7,6 +7,10 @@
type: string
salt_master_control_ip:
type: string
+ tenant_net_static_ip:
+ type: string
+ external_net_static_ip:
+ type: string
network:
type: string
cfg01_flavor:
@@ -23,6 +27,9 @@
type: string
env_name:
type: string
+ role:
+ type: comma_delimited_list
+ default: [salt_master]
resources:
instance_port01:
@@ -41,6 +48,22 @@
fixed_ips:
- ip_address: { get_param: salt_master_control_ip }
+ instance_port03:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: tenant_net_static_ip }
+
+ instance_port04:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'external_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: external_net_static_ip }
+
instance_instance:
type: OS::Nova::Server
properties:
@@ -55,6 +78,8 @@
networks:
- port: { get_resource: instance_port01 }
- port: { get_resource: instance_port02 }
+ - port: { get_resource: instance_port03 }
+ - port: { get_resource: instance_port04 }
block_device_mapping_v2:
- device_name: /dev/cdrom
device_type: cdrom
@@ -63,8 +88,7 @@
image: { list_join: ['', [ 'cfg01.', { get_param: env_name }, '-config-drive.iso' ]] }
volume_size: 1
metadata:
- roles:
- - salt_master
+ roles: { get_param: role }
floating_ip:
depends_on: [instance_instance]
diff --git a/tcp_tests/templates/_heat_environments/fragments/MultipleInstance.yaml b/tcp_tests/templates/_heat_environments/fragments/MultipleInstance.yaml
index 986b855..b7282d8 100644
--- a/tcp_tests/templates/_heat_environments/fragments/MultipleInstance.yaml
+++ b/tcp_tests/templates/_heat_environments/fragments/MultipleInstance.yaml
@@ -25,6 +25,27 @@
type: string
instance03_control_net_static_ip:
type: string
+ instance01_tenant_net_static_ip:
+ type: string
+ instance02_tenant_net_static_ip:
+ type: string
+ instance03_tenant_net_static_ip:
+ type: string
+ instance01_external_net_static_ip:
+ type: string
+ instance02_external_net_static_ip:
+ type: string
+ instance03_external_net_static_ip:
+ type: string
+ instance01_role:
+ type: comma_delimited_list
+ default: [salt_minion]
+ instance02_role:
+ type: comma_delimited_list
+ default: [salt_minion]
+ instance03_role:
+ type: comma_delimited_list
+ default: [salt_minion]
underlay_userdata:
type: string
mcp_version:
@@ -41,7 +62,10 @@
key_pair: { get_param: key_pair }
network: { get_param: network }
control_net_static_ip: {get_param: instance01_control_net_static_ip }
+ tenant_net_static_ip: {get_param: instance01_tenant_net_static_ip }
+ external_net_static_ip: {get_param: instance01_external_net_static_ip }
instance_name: { get_param: instance01_name }
+ role: { get_param: instance01_role }
instance_domain: { get_param: instance_domain }
instance_flavor: { get_param: instance_flavor }
instance_config_host: { get_param: instance_config_host }
@@ -55,7 +79,10 @@
key_pair: { get_param: key_pair }
network: { get_param: network }
control_net_static_ip: {get_param: instance02_control_net_static_ip }
+ tenant_net_static_ip: {get_param: instance02_tenant_net_static_ip }
+ external_net_static_ip: {get_param: instance02_external_net_static_ip }
instance_name: { get_param: instance02_name }
+ role: { get_param: instance02_role }
instance_domain: { get_param: instance_domain }
instance_flavor: { get_param: instance_flavor }
instance_config_host: { get_param: instance_config_host }
@@ -69,7 +96,10 @@
key_pair: { get_param: key_pair }
network: { get_param: network }
control_net_static_ip: {get_param: instance03_control_net_static_ip }
+ tenant_net_static_ip: {get_param: instance03_tenant_net_static_ip }
+ external_net_static_ip: {get_param: instance03_external_net_static_ip }
instance_name: { get_param: instance03_name }
+ role: { get_param: instance03_role }
instance_domain: { get_param: instance_domain }
instance_flavor: { get_param: instance_flavor }
instance_config_host: { get_param: instance_config_host }
diff --git a/tcp_tests/templates/_heat_environments/fragments/VsrxNode.yaml b/tcp_tests/templates/_heat_environments/fragments/VsrxNode.yaml
new file mode 100644
index 0000000..b3b32ef
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/fragments/VsrxNode.yaml
@@ -0,0 +1,96 @@
+heat_template_version: queens
+
+description: Single server instance fragment
+
+parameters:
+ network:
+ type: string
+ instance_flavor:
+ type: string
+ instance_image:
+ type: string
+ instance_name:
+ type: string
+ instance_config_host:
+ type: string
+ key_pair:
+ type: string
+ instance_domain:
+ type: string
+ net_public:
+ type: string
+ control_net_static_ip:
+ type: string
+ tenant_net_static_ip:
+ type: string
+ external_net_static_ip:
+ type: string
+ # underlay_userdata:
+ # type: string
+ env_name:
+ type: string
+ mcp_version:
+ type: string
+
+resources:
+ instance_port01:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: control_net_static_ip }
+ instance_port02:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: tenant_net_static_ip }
+ instance_port03:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'external_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: external_net_static_ip }
+ instance_port04:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
+
+ instance_instance:
+ type: OS::Nova::Server
+ properties:
+ image_update_policy: REBUILD
+ flavor: { get_param: instance_flavor }
+ image: { get_param: instance_image }
+ key_name: { get_param: key_pair }
+ name:
+ list_join:
+ - '.'
+ - [ { get_param: instance_name }, { get_param: env_name } ]
+ networks:
+ - port: { get_resource: instance_port01 }
+ - port: { get_resource: instance_port02 }
+ - port: { get_resource: instance_port03 }
+ - port: { get_resource: instance_port04 }
+ metadata:
+ roles:
+ - vsrx_node
+
+outputs:
+
+ instance_address:
+ value:
+ get_attr:
+ - instance_instance
+ - addresses
+ - 'management_net'
+ - 0
+ - addr
+ description: "Instance's private IP address"
+ instance:
+ value: { get_resource: instance_instance }
+ description: "Instance"
diff --git a/tcp_tests/templates/_packer/foundation/config-drive/meta-data b/tcp_tests/templates/_packer/foundation/config-drive/meta-data
new file mode 100644
index 0000000..b0c74c9
--- /dev/null
+++ b/tcp_tests/templates/_packer/foundation/config-drive/meta-data
@@ -0,0 +1 @@
+hostname: foundation
diff --git a/tcp_tests/templates/_packer/foundation/config-drive/user-data b/tcp_tests/templates/_packer/foundation/config-drive/user-data
new file mode 100644
index 0000000..1d68c57
--- /dev/null
+++ b/tcp_tests/templates/_packer/foundation/config-drive/user-data
@@ -0,0 +1,72 @@
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ssh_pwauth: True
+users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ - name: jenkins
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFSxeuXh2sO4VYL8N2dlNFVyNcr2RvoH4MeDD/cV2HThfU4/BcH6IOOWXSDibIU279bWVKCL7QUp3mf0Vf7HPuyFuC12QM+l7MwT0jCYh5um3hmAvM6Ga0nkhJygHexe9/rLEYzZJkIjP9/IS/YXSv8rhHg484wQ6qkEuq15nyMqil8tbDQCq0XQ+AWEpNpIa4pUoKmFMsOP8lq10KZXIXsJyZxizadr6Bh4Lm9LWrk8YCw7qP3rmgWxK/s8qXQh1ISZe6ONfcmk6p03qbh4H3CwKyWzxmnIHQvE6PgN/O+PuAZj3PbR2mkkJjYX4jNPlxvj8uTStaVPhAwfR9Spdx jenkins@cz8133
+
+disable_root: false
+chpasswd:
+ list: |
+ root:r00tme
+ jenkins:qalab
+ expire: False
+
+output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+runcmd:
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+
+write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /etc/bash_completion.d/fuel_devops30_activate
+ content: |
+ source /home/jenkins/fuel-devops30/bin/activate
+
+ - path: /etc/sysctl.d/99-fuel-devops.conf
+ content: |
+ net.bridge.bridge-nf-call-arptables = 0
+ net.bridge.bridge-nf-call-ip6tables = 0
+ net.bridge.bridge-nf-call-iptables = 0
+
+ - path: /etc/ssh/ssh_config
+ content: |
+ Host *
+ SendEnv LANG LC_*
+ HashKnownHosts yes
+ GSSAPIAuthentication yes
+ GSSAPIDelegateCredentials no
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
+
+ - path: /etc/sudoers.d/99-mirantis
+ content: |
+ %mirantis ALL=(ALL) NOPASSWD:ALL
diff --git a/tcp_tests/templates/_packer/foundation/packer.json b/tcp_tests/templates/_packer/foundation/packer.json
new file mode 100644
index 0000000..452fdef
--- /dev/null
+++ b/tcp_tests/templates/_packer/foundation/packer.json
@@ -0,0 +1,64 @@
+{
+ "variables": {
+ "vm_name": "{{ env `IMAGE_NAME` }}.qcow2",
+ "image_path": "tmp/{{ env `IMAGE_NAME` }}",
+ "base_image_url": "{{ env `BASE_IMAGE_URL` }}",
+ "base_image_md5": "{{ env `BASE_IMAGE_MD5` }}",
+ "base_image_path": "base_image.qcow2",
+ "ssh_username": "root",
+ "ssh_password": "r00tme",
+ "ssh_wait_timeout": "30m",
+ "disk_size": "51200",
+ "boot_wait": "120s"
+ },
+
+ "builders":
+ [
+ {
+ "type": "qemu",
+ "qemuargs": [
+ [ "-m", "1024M" ],
+ [ "-cdrom", "tmp/config-drive.iso" ],
+ ["-device", "virtio-net,netdev=user.0"],
+ ["-object","rng-random,id=objrng0,filename=/dev/urandom"],
+ ["-device", "virtio-rng-pci,rng=objrng0,id=rng0,bus=pci.0,addr=0x10" ]
+ ],
+ "vm_name": "{{ user `vm_name` }}",
+ "output_directory": "{{ user `image_path` }}",
+ "format": "qcow2",
+ "iso_url": "{{ user `base_image_url` }}",
+ "iso_checksum": "{{ user `base_image_md5` }}",
+ "iso_checksum_type": "md5",
+ "iso_target_path": "{{ user `base_image_path`}}",
+ "disk_image": true,
+ "disk_compression": true,
+ "accelerator": "kvm",
+ "disk_size": "{{ user `disk_size`}}",
+ "headless": true,
+ "ssh_username": "{{ user `ssh_username` }}",
+ "ssh_password": "{{ user `ssh_password` }}",
+ "ssh_wait_timeout": "{{ user `ssh_wait_timeout` }}",
+ "ssh_host_port_min": 7000,
+ "ssh_host_port_max": 7050,
+ "shutdown_command": "shutdown -P now",
+ "boot_wait": "{{ user `boot_wait` }}"
+ }
+ ],
+
+ "provisioners": [
+ {
+ "type": "shell",
+ "environment_vars": [
+ "DEBIAN_FRONTEND=noninteractive"
+ ],
+ "execute_command": "echo '{{ user `ssh_password` }}' | {{.Vars}} sudo -S -E bash -x '{{.Path}}'",
+ "scripts": [
+ "tcp_tests/templates/_packer/scripts/ubuntu_packets.sh",
+ "tcp_tests/templates/_packer/scripts/ubuntu_ldap.sh",
+ "tcp_tests/templates/_packer/scripts/jenkins_virtualenvs.sh",
+ "tcp_tests/templates/_packer/scripts/ubuntu_cleanup.sh",
+ "tcp_tests/templates/_packer/scripts/zerodisk.sh"
+ ]
+ }
+ ]
+}
diff --git a/tcp_tests/templates/_packer/scripts/jenkins_virtualenvs.sh b/tcp_tests/templates/_packer/scripts/jenkins_virtualenvs.sh
new file mode 100644
index 0000000..eb83ab4
--- /dev/null
+++ b/tcp_tests/templates/_packer/scripts/jenkins_virtualenvs.sh
@@ -0,0 +1,23 @@
+#!/bin/bash -xe
+
+DEVOPS_VENV_PATH=/home/jenkins/fuel-devops30
+REPORT_VENV_PATH=/home/jenkins/venv_testrail_reporter
+
+if [ ! -d ${DEVOPS_VENV_PATH} ]; then
+ virtualenv ${DEVOPS_VENV_PATH}
+fi
+if [ ! -d ${REPORT_VENV_PATH} ]; then
+ virtualenv ${REPORT_VENV_PATH}
+fi
+
+# Install tcp-qa requirements
+. ${DEVOPS_VENV_PATH}/bin/activate
+pip install -r https://raw.githubusercontent.com/Mirantis/tcp-qa/master/tcp_tests/requirements.txt
+pip install psycopg2 # workaround for setup with PostgreSQL , to keep requirements.txt for Sqlite3 only
+
+# Install xunit2testrail
+. ${REPORT_VENV_PATH}/bin/activate
+#pip install xunit2testrail -U
+pip install git+https://github.com/dis-xcom/testrail_reporter -U # Removed accessing to an unexisting pastebin on srv62
+
+chown -R jenkins:jenkins /home/jenkins/
diff --git a/tcp_tests/templates/_packer/scripts/ubuntu_cleanup.sh b/tcp_tests/templates/_packer/scripts/ubuntu_cleanup.sh
new file mode 100644
index 0000000..63a7586
--- /dev/null
+++ b/tcp_tests/templates/_packer/scripts/ubuntu_cleanup.sh
@@ -0,0 +1,70 @@
+#!/bin/bash -xe
+
+apt-get -y remove --purge unattended-upgrades || true
+apt-get -y autoremove --purge
+apt-get -y clean
+
+rm -rf /var/lib/apt/lists/* || true
+rm -rv /etc/apt/sources.list.d/* || true
+rm -rv /etc/apt/preferences.d/* || true
+echo > /etc/apt/sources.list || true
+rm -vf /usr/sbin/policy-rc.d || true
+
+echo "cleaning up hostname"
+sed -i "/.*ubuntu.*/d" /etc/hosts
+sed -i "/.*salt.*/d" /etc/hosts
+
+echo "cleaning up guest additions"
+rm -rf VBoxGuestAdditions_*.iso VBoxGuestAdditions_*.iso.? || true
+
+echo "cleaning up dhcp leases"
+rm -rf /var/lib/dhcp/* || true
+rm -rfv /var/lib/ntp/ntp.conf.dhcp || true
+
+echo "cleaning up udev rules"
+rm -fv /etc/udev/rules.d/70-persistent-net.rules || true
+rm -rf /dev/.udev/ || true
+rm -fv /lib/udev/rules.d/75-persistent-net-generator.rules || true
+
+echo "cleaning up minion_id for salt"
+rm -vf /etc/salt/minion_id || true
+
+echo "cleaning up resolvconf"
+sed -i '/172\.18\.208\.44/d' /etc/resolvconf/resolv.conf.d/base
+
+echo "cleaning up /var/cache/{apt,salt}/*"
+rm -rf /var/cache/{apt,salt}/* || true
+
+rm -rf /root/.cache || true
+rm -rf /root/.ssh/known_hosts || true
+
+# Remove flags
+rm -v /done_ubuntu_base || true
+rm -v /done_ubuntu_salt_bootstrap || true
+
+# Force cleanup cloud-init data, if it was
+if [[ -d '/var/lib/cloud/' ]] ; then
+ rm -rf /var/lib/cloud/* || true
+ cloud-init clean || true
+ echo > /var/log/cloud-init-output.log || true
+ echo > /var/log/cloud-init.log || true
+fi
+
+cat << EOF > /etc/network/interfaces
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+# Source interfaces
+# Please check /etc/network/interfaces.d before changing this file
+# as interfaces may have been defined in /etc/network/interfaces.d
+# See LP: #1262951
+source /etc/network/interfaces.d/*.cfg
+EOF
+
+# Clear\drop cache's
+sync
+echo 3 > /proc/sys/vm/drop_caches
diff --git a/tcp_tests/templates/_packer/scripts/ubuntu_ldap.sh b/tcp_tests/templates/_packer/scripts/ubuntu_ldap.sh
new file mode 100644
index 0000000..4c400fb
--- /dev/null
+++ b/tcp_tests/templates/_packer/scripts/ubuntu_ldap.sh
@@ -0,0 +1,56 @@
+#!/bin/bash -xe
+
+apt-get update
+apt-get install -y ldap-auth-client nscd ldap-utils
+
+auth-client-config -t nss -p lac_ldap
+
+sed -i 's$^#bind_policy hard$bind_policy soft$' /etc/ldap.conf
+sed -i 's$base dc=.*$base dc=mirantis,dc=net$' /etc/ldap.conf
+sed -i 's$uri ldap.*$uri ldap://ldap-bud.bud.mirantis.net/$' /etc/ldap.conf
+sed -i 's$^\(rootbinddn.*\)$#\1$' /etc/ldap.conf
+
+cat << 'EOF' >> /etc/ldap/ldap.conf
+BASE dc=mirantis,dc=net
+URI ldap://ldap-bud.bud.mirantis.net/
+EOF
+
+cat << 'EOF' > /usr/share/pam-configs/my_mkhomedir
+Name: activate mkhomedir
+Default: yes
+Priority: 900
+Session-Type: Additional
+Session:
+ required pam_mkhomedir.so umask=0022 skel=/etc/skel
+EOF
+
+cat << 'EOF' >> /etc/security/group.conf
+*;*;*;Al0000-2400;audio,cdrom,dialout,floppy,kvm,libvirtd
+EOF
+
+cat << 'EOF' > /usr/share/pam-configs/my_groups
+Name: activate /etc/security/group.conf
+Default: yes
+Priority: 900
+Auth-Type: Primary
+Auth:
+ required pam_group.so use_first_pass
+EOF
+
+cat << 'EOF' > /usr/local/sbin/ssh-ldap-keyauth
+#!/bin/bash
+
+/usr/bin/ldapsearch -x '(&(objectClass=posixAccount)(uid='"$1"'))' sshPublicKey | sed -n '/^ /{H;d};/sshPublicKey:/x;$g;s/\n *//g;s/sshPublicKey: //gp'
+EOF
+
+cat << 'EOF' >> /etc/ssh/sshd_config
+
+AuthorizedKeysCommand /usr/local/sbin/ssh-ldap-keyauth
+AuthorizedKeysCommandUser nobody
+EOF
+
+chmod +x /usr/local/sbin/ssh-ldap-keyauth
+DEBIAN_FRONTEND=noninteractive pam-auth-update
+
+#systemctl restart nscd.service;
+#systemctl restart sshd.service;
diff --git a/tcp_tests/templates/_packer/scripts/ubuntu_packets.sh b/tcp_tests/templates/_packer/scripts/ubuntu_packets.sh
new file mode 100644
index 0000000..883f620
--- /dev/null
+++ b/tcp_tests/templates/_packer/scripts/ubuntu_packets.sh
@@ -0,0 +1,17 @@
+#!/bin/bash -xe
+
+apt-get update
+
+# for Jenkins agent
+apt-get install -y openjdk-8-jre-headless
+# for fuel-devops and tcp-qa
+apt-get install -y libyaml-dev libffi-dev libvirt-dev python-dev pkg-config vlan bridge-utils python-pip python3-pip virtualenv
+# additional tools
+apt-get install -y ebtables curl ethtool iputils-ping lsof strace tcpdump traceroute wget iptables htop \
+ git jq ntpdate tree mc byobu at pm-utils genisoimage iotop
+
+# ldap
+apt-get install -y ldap-auth-client nscd ldap-utils
+
+# update kernel
+apt-get install -y linux-generic-hwe-16.04
diff --git a/tcp_tests/templates/_packer/scripts/zerodisk.sh b/tcp_tests/templates/_packer/scripts/zerodisk.sh
new file mode 100644
index 0000000..159ae13
--- /dev/null
+++ b/tcp_tests/templates/_packer/scripts/zerodisk.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+set -x
+
+dd if=/dev/zero of=/EMPTY bs=1M || true
+rm -f /EMPTY
+
+sync
+echo 3 > /proc/sys/vm/drop_caches
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index 09690cf..b9c826d 100644
--- a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -77,6 +77,7 @@
control_network_subnet: 10.167.11.0/24
control_vlan: '2404'
cookiecutter_template_branch: proposed
+ jenkins_pipelines_branch: 'release/2019.2.0'
cookiecutter_template_credentials: gerrit
cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.164.1
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index 1d269c3..4625136 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -19,6 +19,7 @@
control_network_subnet: 10.167.11.0/24
control_vlan: '2404'
cookiecutter_template_branch: proposed
+ jenkins_pipelines_branch: 'release/2019.2.0'
cookiecutter_template_credentials: gerrit
cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.164.1
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/core.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/core.yaml
deleted file mode 100644
index fc35f88..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-maas/core.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-{% from 'cookied-bm-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/lab04-physical-inventory.yaml
deleted file mode 100644
index 69a9df0..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-maas/lab04-physical-inventory.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-nodes:
- cfg01.cookied-bm-contrail-maas.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- # Physical nodes
-
- kvm01.cookied-bm-contrail-maas.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm02.cookied-bm-contrail-maas.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm03.cookied-bm-contrail-maas.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- cmp001.cookied-bm-contrail-maas.local:
- reclass_storage_name: openstack_compute_node01
- roles:
- - openstack_compute
- - features_lvm_backend
- - linux_system_codename_xenial
- interfaces:
- enp2s0f1:
- role: single_mgm
- deploy_address: 172.16.49.73
- enp5s0f0:
- role: bond0_ab_contrail
- tenant_address: 192.168.0.101
- enp5s0f1:
- role: single_vlan_ctl
- single_address: 10.167.8.101
-
- cmp002.cookied-bm-contrail-maas.local:
- reclass_storage_name: openstack_compute_node02
- roles:
- - openstack_compute
- - features_lvm_backend
- - linux_system_codename_xenial
- interfaces:
- enp2s0f1:
- role: single_mgm
- deploy_address: 172.16.49.74
- enp5s0f0:
- role: bond0_ab_contrail
- tenant_address: 192.168.0.102
- enp5s0f1:
- role: single_vlan_ctl
- single_address: 10.167.8.102
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/maas.yml b/tcp_tests/templates/cookied-bm-contrail-maas/maas.yml
deleted file mode 100644
index ed0191c..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-maas/maas.yml
+++ /dev/null
@@ -1,111 +0,0 @@
-classes:
-- system.linux.system.repo.mcp.apt_mirantis.maas
-- system.maas.region.single
-parameters:
- _param:
- maas_region_port: 5240
- maas_cluster_region_port: ${_param:maas_region_port}
- power_user: mcp-qa
- power_password: password
- maas:
- cluster:
- region:
- host: ${_param:deploy_address}:${_param:maas_cluster_region_port}
- region:
- bind:
- host: ${_param:deploy_address}:${_param:maas_region_port}
- subnets:
- 172.16.49.64/26:
- cidr: 172.16.49.64/26
- fabric: fabric-51
- gateway_ip: 172.16.49.65
- iprange:
- end: 172.16.49.119
- start: 172.16.49.77
- fabrics:
- fabric-51:
- description: Fabric for deploy
- #commissioning_scripts:
- # 00-maas-05-simplify-network-interfaces: /etc/maas/files/commisioning_scripts/00-maas-05-simplify-network-interfaces
- machines:
- kvm01: # cz7341-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- # pxe_interface_mac:
- pxe_interface_mac: "0c:c4:7a:33:1f:e4"
- interfaces:
- one1:
- mac: "0c:c4:7a:33:1f:e4"
- mode: "static"
- ip: ${_param:infra_kvm_node01_deploy_address}
- subnet: "10.10.0.0/16" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.161"
- power_password: ${_param:power_password}
- power_type: ipmi
- power_user: ${_param:power_user}
- kvm02: # #cz7342-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:33:20:fc"
- interfaces:
- one1:
- mac: "0c:c4:7a:33:20:fc"
- mode: "static"
- ip: ${_param:infra_kvm_node02_deploy_address}
- subnet: "10.10.0.0/16" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.162"
- power_password: ${_param:power_password}
- power_type: ipmi
- power_user: ${_param:power_user}
- kvm03: # #cz7343-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:31:fb:b6"
- interfaces:
- one1:
- mac: "0c:c4:7a:31:fb:b6"
- mode: "static"
- ip: ${_param:infra_kvm_node03_deploy_address}
- subnet: "10.10.0.0/16" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.163"
- power_password: ${_param:power_password}
- power_type: ipmi
- power_user: ${_param:power_user}
- cmp001: # #cz7345-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:31:f0:12"
- interfaces:
- one1:
- mac: "0c:c4:7a:31:f0:12"
- mode: "static"
- ip: ${_param:infra_kvm_node04_deploy_address}
- subnet: "10.10.0.0/16" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.17"
- power_password: ${_param:power_password}
- power_type: ipmi
- power_user: ${_param:power_user}
- cmp002: # cz7346-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:31:ef:bc"
- interfaces:
- one1:
- mac: "0c:c4:7a:31:ef:bc"
- mode: "static"
- ip: ${_param:infra_kvm_node05_deploy_address}
- subnet: "10.10.0.0/16" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.18"
- power_password: ${_param:power_password}
- power_type: ipmi
- power_user: ${_param:power_user}
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/openstack.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/openstack.yaml
deleted file mode 100644
index 65cd68b..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-maas/openstack.yaml
+++ /dev/null
@@ -1,188 +0,0 @@
-{% from 'cookied-bm-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-contrail-maas/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
-
-# install contrail
-- description: Install Opencontrail db on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail db on all nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database' state.sls opencontrail.database
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail control on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail control on all nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail on collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Workaround for https://mirantis.jira.com/browse/PROD-12798
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' service.restart 'keepalived'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# OpenContrail vrouters
-- description: Install Opencontrail client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database:id:1' state.sls 'opencontrail.client'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail client on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls 'opencontrail.client'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls 'opencontrail'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Test Opencontrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' cmd.run 'contrail-status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on ctl
- cmd: salt "ctl01*" cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: salt "ctl01*" cmd.run 'iptables --policy FORWARD ACCEPT'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Hack vrouter (Delete default moun point)
- cmd: salt "cmp*" cmd.run "sed -i 's/exit 0//g' /etc/rc.local; echo 'umount /dev/hugepages; service supervisor-vrouter restart' >> /etc/rc.local; echo 'exit 0' >> /etc/rc.local"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Temporary WR for correct pci in vrouter.conf
- cmd: salt "cmp*" cmd.run "sed -i 's/physical\_interface\_address\=.*/physical\_interface\_address=0000\:05\:00\.0/g' /etc/contrail/contrail-vrouter-agent.conf"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Remove crashes files from /var/crashes/ while vrouter was crashed
- cmd: salt "cmp*" cmd.run "rm -rf /var/crashes/*"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Reboot computes
- cmd: salt --timeout=600 "cmp*" system.reboot
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/overrides-policy.yml b/tcp_tests/templates/cookied-bm-contrail-maas/overrides-policy.yml
deleted file mode 100644
index 1f35a6b..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-maas/overrides-policy.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-parameters:
- nova:
- controller:
- policy:
- context_is_admin: 'role:admin or role:administrator'
- 'compute:create': 'rule:admin_or_owner'
- 'compute:create:attach_network':
- cinder:
- controller:
- policy:
- 'volume:delete': 'rule:admin_or_owner'
- 'volume:extend':
- neutron:
- server:
- policy:
- create_subnet: 'rule:admin_or_network_owner'
- 'get_network:queue_id': 'rule:admin_only'
- 'create_network:shared':
- glance:
- server:
- policy:
- publicize_image: "role:admin"
- add_member:
- keystone:
- server:
- policy:
- admin_or_token_subject: 'rule:admin_required or rule:token_subject'
- heat:
- server:
- policy:
- context_is_admin: 'role:admin and is_admin_project:True'
- deny_stack_user: 'not role:heat_stack_user'
- deny_everybody: '!'
- 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
- 'cloudformation:DescribeStackResources':
- ceilometer:
- server:
- policy:
- segregation: 'rule:context_is_admin'
- 'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-cookiecutter-contrail.yaml
deleted file mode 100644
index 02cf300..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-cookiecutter-contrail.yaml
+++ /dev/null
@@ -1,202 +0,0 @@
-default_context:
- backup_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEA1Hbf7nJ3VGyRxqwPNqnDcspyuJXf0WwuEJJyDxATV0JTuZSz
- jcT4A1XLN/WG8diN0Q5/tYRpuSxNKaz3nPxXjTqK1byrB7jGBhNQeWYHMNoBH3VR
- Kzm4yBGIKaG6k5wYB5kS950zxNHIKQ7Mo4+0WloIlgZTkMrQo98FaD1X9jyVJm7O
- TfIqTwYSJb4TJf/hgL8xntpGC6gRjZDQzlxB25bSP2+u3KzfUyDiaK8hs5PWwnNn
- KfvZmYZmsgA4+D+dQh4YkyfDN6hQL4ttW+2SRpZ/js+KTkqU1Vyn5P5cbf2RwH0G
- VewWyYA2ZK7nXPYH7+ia1rPj6wO3UXEFnxVzowIDAQABAoIBAQDCgenURFrWoWZ7
- ub1bz+MszgZk0mDLOvyZE1j0TUVHl2NK/MW8vlxHwV2AZ3kZI6YBhAKKzSR08Afc
- ZYty3tnQY44CyuzQ7unrWfdMjIl8wbhRcnfS2M8/6jz70CIdTMP7ALqKkhJ4140l
- eXUDMGZuaQp/Pl92qRaFT4GFwtMqivQobCX5/yehd3+mMu8CkK+1U0T/9gWacEdv
- JISAkfpOGkXLmZ/ekkqNFfv6SrNaefaYjqMeGk3ZrFmPAEstqxo0tiUjV5BYXCME
- SsJCF/EnDHxMYzwkqCCGrXY0DbXGqF1B+dO8MqeYTIYyohEJ82vkezP5BqM4L/3Z
- Lec3ypPhAoGBAPh0eBKNgLOhMz+h1DnjdTJAcsa58ZQCH6r+pTVIhwmbCT/3RTJT
- oizYVhtlQyi0lHsxMwtYx+oj1Qacs7jb6UH24d6+oX0JZr/lfxqZVvgmTOoNED3l
- ZdX3xU4GOMhWXS7IEhk750LFGF1k7QLcoULx3u/8dZNgW0kNZUsptkxpAoGBANrq
- m9CFnbzSm20EicfT8FX7Hu6Wl3Lgwnsgc+7+dkm8uST+DdDWjUymkTjeIq/8Z/va
- I+rhURp+r7tLFAy+sC6YeLZ7oHcUIfUwXOdlIJOpdVayF5oiZ5qCSkNcT2oJfFdJ
- Uf3/gwQDxg6CKJIKdnyK1njnfldfoFLZz9Z1Ze4rAoGBAMhfde7Qe/liiihJZRUC
- oiPS4j3u/Ct3wv5uu+JLCczvYfhafU3nMSWlm1wgwJb1e8IWnaoLAb+NAmKAwljV
- 0jrG1saDS03B5UHh3i4feIpMqT8hJfYlKYn0dwVD80tui1wNMrtzGkE5HztDB/qE
- 4PFSi49UNaaT0UsLKKQDkefxAoGAf91+iwowOuTsoX2AGHajLyVRSNwus4uyLIal
- EJgScTlJDuFRIoTe3UGBGy0sJ4yPE9yzE/LtE0Oh0wykNll+wIiQIU4OSN86gmLw
- MLuxjm3xOmUlQgMMboPhanzVacMGnFkYCfqfBM5LdZfyqHJyCIZzhQT5l4EkPKA6
- NDI4CicCgYAtaC63kRjv8NNWo1iuovOLF4pdnYEviT37s24zlf18IKLZTW66AGbX
- 2lLHBiS7SyLEvIpn8/Vwh185CbTitlfRpU5bPzwk4dvKPgUa0eiov/Voe+WPWafQ
- +uErJ3mt7l+dThL1q70aD6Dl1pbMjG5xbIKSmXNrmrrMVN8+pM2BJQ==
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUdt/ucndUbJHGrA82qcNyynK4ld/RbC4QknIPEBNXQlO5lLONxPgDVcs39Ybx2I3RDn+1hGm5LE0prPec/FeNOorVvKsHuMYGE1B5Zgcw2gEfdVErObjIEYgpobqTnBgHmRL3nTPE0cgpDsyjj7RaWgiWBlOQytCj3wVoPVf2PJUmbs5N8ipPBhIlvhMl/+GAvzGe2kYLqBGNkNDOXEHbltI/b67crN9TIOJoryGzk9bCc2cp+9mZhmayADj4P51CHhiTJ8M3qFAvi21b7ZJGln+Oz4pOSpTVXKfk/lxt/ZHAfQZV7BbJgDZkrudc9gfv6JrWs+PrA7dRcQWfFXOj
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-bm-contrail-maas.local
- cluster_name: cookied-bm-contrail-maas
- compute_bond_mode: active-backup
- compute_padding_with_zeros: 'True'
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: ADkYzQUNzQcfbBH3McX2MLVK7EkgliUBuVRvaw6e4pFcRtbkawgc9FTHFaw1L5Eh
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.8.0/24
- control_vlan: '2422'
- cookiecutter_template_branch: ''
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 172.16.49.65
- deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.49.64/26
- deployment_type: physical
- dns_server01: 172.18.208.44
- dns_server02: 8.8.4.4
- email_address: sgudz@mirantis.com
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.8.241
- infra_kvm01_deploy_address: 172.16.49.67
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.8.242
- infra_kvm02_deploy_address: 172.16.49.68
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.8.243
- infra_kvm03_deploy_address: 172.16.49.69
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.8.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- internal_proxy_enabled: 'False'
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 172.16.49.66
- maas_deploy_cidr: 172.16.49.64/26
- maas_deploy_gateway: 172.16.49.65
- maas_deploy_range_end: 172.16.49.119
- maas_deploy_range_start: 172.16.49.77
- maas_deploy_vlan: '0'
- maas_dhcp_enabled: 'True'
- maas_fabric_name: fabric-51
- maas_hostname: cfg01
- maas_manage_deploy_network: 'True'
- mcp_common_scripts_branch: ''
- mcp_version: proposed
- offline_deployment: 'False'
- opencontrail_analytics_address: 10.167.8.30
- opencontrail_analytics_hostname: nal
- opencontrail_analytics_node01_address: 10.167.8.31
- opencontrail_analytics_node01_hostname: nal01
- opencontrail_analytics_node02_address: 10.167.8.32
- opencontrail_analytics_node02_hostname: nal02
- opencontrail_analytics_node03_address: 10.167.8.33
- opencontrail_analytics_node03_hostname: nal03
- opencontrail_compute_iface_mask: '24'
- opencontrail_control_address: 10.167.8.20
- opencontrail_control_hostname: ntw
- opencontrail_control_node01_address: 10.167.8.21
- opencontrail_control_node01_hostname: ntw01
- opencontrail_control_node02_address: 10.167.8.22
- opencontrail_control_node02_hostname: ntw02
- opencontrail_control_node03_address: 10.167.8.23
- opencontrail_control_node03_hostname: ntw03
- opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.100
- opencontrail_router01_hostname: rtr01
- opencontrail_router02_address: 10.167.8.101
- opencontrail_router02_hostname: rtr02
- openssh_groups: ''
- openstack_benchmark_node01_address: 10.167.8.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.8
- openstack_compute_rack01_tenant_subnet: 192.168.0
- openstack_control_address: 10.167.8.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.8.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.8.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.8.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.8.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.8.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.8.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.8.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_message_queue_address: 10.167.8.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.8.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.8.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.8.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: opencontrail
- openstack_neutron_bgp_vpn: 'False'
- openstack_neutron_bgp_vpn_driver: bagpipe
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_nova_compute_reserved_host_memory_mb: '900'
- openstack_proxy_address: 10.167.8.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.8.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.8.82
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.8.19
- openstack_version: ocata
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- salt_api_password: 981k0rv2KevPhpyyy3BgZK8cNZWUjifx
- salt_api_password_hash: $6$kpjwqhVv$gUQV0XYxXNUu3ESKSmE1s.eDAaYunerIsF3DdzjvMqCRiH7DdOWuun/pdjSVp.jjKHYsb0GimXyUh6sX/77PM/
- salt_master_address: 172.16.49.66
- salt_master_hostname: cfg01
- salt_master_management_address: 172.16.49.66
- shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- stacklight_enabled: 'True'
- stacklight_log_address: 10.167.8.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.8.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.8.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.8.63
- stacklight_log_node03_hostname: log03
- stacklight_long_term_storage_type: influxdb
- stacklight_monitor_address: 10.167.8.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.8.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.8.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.8.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 10.167.8.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.8.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.8.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.8.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 192.168.0.0/24
- tenant_vlan: '2423'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'True'
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-environment.yaml
deleted file mode 100644
index c4346b6..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-environment.yaml
+++ /dev/null
@@ -1,247 +0,0 @@
-nodes:
- # Virtual Control Plane nodes
-
- ctl01.cookied-bm-contrail-maas.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_ctl
-
- ctl02.cookied-bm-contrail-maas.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ctl03.cookied-bm-contrail-maas.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs01.cookied-bm-contrail-maas.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs02.cookied-bm-contrail-maas.local:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs03.cookied-bm-contrail-maas.local:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg01.cookied-bm-contrail-maas.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg02.cookied-bm-contrail-maas.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg03.cookied-bm-contrail-maas.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- prx01.cookied-bm-contrail-maas.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- prx02.cookied-bm-contrail-maas.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon01.cookied-bm-contrail-maas.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon02.cookied-bm-contrail-maas.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon03.cookied-bm-contrail-maas.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- nal01.cookied-bm-contrail-maas.local:
- reclass_storage_name: opencontrail_analytics_node01
- roles:
- - opencontrail_analytics
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- nal02.cookied-bm-contrail-maas.local:
- reclass_storage_name: opencontrail_analytics_node02
- roles:
- - opencontrail_analytics
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- nal03.cookied-bm-contrail-maas.local:
- reclass_storage_name: opencontrail_analytics_node03
- roles:
- - opencontrail_analytics
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- ntw01.cookied-bm-contrail-maas.local:
- reclass_storage_name: opencontrail_control_node01
- roles:
- - opencontrail_control
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- ntw02.cookied-bm-contrail-maas.local:
- reclass_storage_name: opencontrail_control_node02
- roles:
- - opencontrail_control
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- ntw03.cookied-bm-contrail-maas.local:
- reclass_storage_name: opencontrail_control_node03
- roles:
- - opencontrail_control
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- mtr01.cookied-bm-contrail-maas.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr02.cookied-bm-contrail-maas.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr03.cookied-bm-contrail-maas.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log01.cookied-bm-contrail-maas.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log02.cookied-bm-contrail-maas.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log03.cookied-bm-contrail-maas.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
-# bmk01.cookied-bm-contrail-maas.local:
-# reclass_storage_name: openstack_benchmark_node01
-# roles:
-# - openstack_benchmark
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml
deleted file mode 100644
index 51dfc5d..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml
+++ /dev/null
@@ -1,234 +0,0 @@
-{% from 'cookied-bm-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-contrail-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-bm-contrail-maas/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-contrail-maas/underlay.yaml' import REPOSITORY_SUITE with context %}
-{% from 'cookied-bm-contrail-maas/underlay.yaml' import ETH1_IP_ADDRESS_CFG01 with context %}
-{% from 'cookied-bm-contrail-maas/underlay.yaml' import MAAS_DHCP_POOL_START with context %}
-{% from 'cookied-bm-contrail-maas/underlay.yaml' import MAAS_DHCP_POOL_END with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-contrail-maas') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-- description: Upload maas template
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: maas.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/
- node_name: {{ HOSTNAME_CFG01 }}
- skip_fail: False
-
-- description: "Workaround for rack01 compute generator"
- cmd: |
- set -e;
- # Remove rack01 key
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
- # Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: "WR for changing image to proposed"
- cmd: |
- set -e;
- # Add message_queu host for opencontrail
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcp{{ REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster//{{ LAB_CONFIG_NAME }}infra/init.yml;
- reclass-tools add-key parameters._param.salt_control_trusty_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-14-04-x64-mcp{{ REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster//{{ LAB_CONFIG_NAME }}infra/init.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Temporary WR for correct bridge name according to envoronment templates
- cmd: |
- sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update minion information
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun openssh after env model is generated
- cmd: |
- salt-call state.sls openssh
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure rsyslog on nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure maas.cluster
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls maas.cluster
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure maas.region
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls maas.region
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: Configure dhcp for fabric
- cmd: |
- touch /root/API_KEY_FILE;
- export PROFILE=mirantis;
- export API_KEY_FILE=/root/API_KEY_FILE;
- export MAAS_URL=http://{{ ETH1_IP_ADDRESS_CFG01 }}:5240/MAAS;
- maas-region apikey --username=$PROFILE > $API_KEY_FILE;
- maas login $PROFILE $MAAS_URL - < $API_KEY_FILE;
- maas $PROFILE ipranges create type=dynamic start_ip={{ MAAS_DHCP_POOL_START }} end_ip={{ MAAS_DHCP_POOL_END }} comment='Reserved dynamic range for HW and VCP nodes'
- maas $PROFILE ipranges create type=reserved start_ip={{ ETH1_IP_ADDRESS_CFG01 }} end_ip={{ ETH1_IP_ADDRESS_CFG01 }} comment='This is a reserved IP for cfg with maas node';
- maas $PROFILE vlan update 51 0 dhcp_on=True primary_rack=cfg01;
- maas $PROFILE nodes read |grep status -A 1 -B 1;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Comissioning nodes
- cmd: |
- salt-call state.apply maas.machines;
- salt-call state.apply maas.machines.wait_for_ready;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Assign IPs
- cmd: |
- salt-call state.sls maas.machines.assign_ip;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Deploying BM nodes
- cmd: |
- salt-call maas.deploy_machines || true
- salt-call state.apply maas.machines.wait_for_deployed;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure ntp on all nodes
- cmd: |
- salt '*' state.sls ntp;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-########################################
-# Spin up Control Plane VMs on KVM nodes
-########################################
-
-- description: Execute 'libvirt' states to create necessary libvirt networks
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Syncing before salt control state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Create VMs for control plane
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
- cmd: |
- salt-key -l acc| sort > /tmp/current_keys.txt &&
- salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 20, delay: 30}
- skip_fail: false
-
-#########################################
-# Configure all running salt minion nodes
-#########################################
-
-- description: Hack resolv.conf on VCP nodes for internal services access
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Refresh pillars on all minions
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync all salt resources
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Show reclass-salt --top for generated nodes
- cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/sl.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/sl.yaml
deleted file mode 100644
index 38a8dcd..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-maas/sl.yaml
+++ /dev/null
@@ -1,261 +0,0 @@
-{% from 'cookied-bm-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-#Launch containers
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Alerta if it is exists
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
- cmd: |
- FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Fluentd service presence: ${FLUENTD_SERVICE}";
- if [[ "$FLUENTD_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- else
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
- cmd: |
- CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Ceilometer service presence: ${CEILO}";
- if [[ "$CEILO" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Remote Collector in Docker Swarm for Openstack deployments
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-maas/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 6617855..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,76 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- - sudo route add default gw {gateway} ens3
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-
- - export MAAS_PXE_INTERFACE_NAME=ens4
- - export MAAS_PXE_INTERFACE_ADDRESS={{ os_env('MAAS_PXE_INTERFACE_ADDRESS', '172.16.49.66') }}
- - export MAAS_DHCP_POOL_NETMASK={{ os_env('MAAS_DHCP_POOL_NETMASK', '255.255.255.192') }}
- - export MAAS_DHCP_POOL_NETMASK_PREFIX={{ os_env('MAAS_DHCP_POOL_NETMASK_PREFIX', '26') }}
- - export MAAS_DHCP_POOL_START={{ os_env('MAAS_DHCP_POOL_START', '172.16.49.77') }}
- - export MAAS_DHCP_POOL_END={{ os_env('MAAS_DHCP_POOL_END', '172.16.49.119') }}
- - ifconfig $MAAS_PXE_INTERFACE_NAME $MAAS_PXE_INTERFACE_ADDRESS/$MAAS_DHCP_POOL_NETMASK_PREFIX
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/underlay.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/underlay.yaml
deleted file mode 100644
index 4e7082a..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-maas/underlay.yaml
+++ /dev/null
@@ -1,130 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-bm-contrail-maas/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-contrail-maas') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-
-{% set MAAS_DHCP_POOL_START = os_env('MAAS_DHCP_POOL_START', '172.16.49.77') %}
-{% set MAAS_DHCP_POOL_END = os_env('MAAS_DHCP_POOL_END', '172.16.49.119') %}
-
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-bm-contrail-maas_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- admin-pool01:
- net: {{ os_env('MAAS_ADMIN_ADDRESS_POOL01', '10.50.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +2
- ip_ranges:
- dhcp: [+90, -10]
-
- provisioning-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
- params:
- ip_reserved:
- gateway: +61
- l2_network_device: +61
- default_{{ HOSTNAME_CFG01 }}: +2
- virtual_{{ HOSTNAME_CFG01 }}: +2
- ip_ranges:
- dhcp: [+12, +55]
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- provisioning: provisioning-pool01
-
- l2_network_devices:
- # Ironic management interface
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- provisioning:
- address_pool: provisioning-pool01
- dhcp: false
- forward:
- mode: !os_env MAAS_PXE_IFACE_MODE, bridge
- parent_iface:
- phys_dev: !os_env MAAS_PXE_IFACE
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 180
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
- - label: ens4
- l2_network_device: provisioning
- interface_model: *interface_model
-
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - provisioning
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/core.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/core.yaml
deleted file mode 100644
index 64f01fa..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/core.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-{% from 'cookied-bm-contrail-nfv-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/lab04-physical-inventory.yaml
deleted file mode 100644
index 3c5401c..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/lab04-physical-inventory.yaml
+++ /dev/null
@@ -1,81 +0,0 @@
-nodes:
- cfg01.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- # Physical nodes
-
- kvm01.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm02.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm03.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- cmp001.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: openstack_compute_node01
- roles:
- - openstack_compute_dpdk
- - features_lvm_backend
- - linux_system_codename_xenial
- interfaces:
- enp2s0f0:
- role: single_vlan_ctl
- single_address: 10.167.8.101
- enp2s0f1:
- role: single_mgm
- deploy_address: 172.16.49.73
- enp5s0f0:
- role: single_contrail_dpdk_prv
- tenant_address: 192.168.0.101
- dpdk_pci: "'0000:05:00.0'"
- dpdk_mac: '90:e2:ba:19:c2:18'
-
- cmp002.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: openstack_compute_node02
- roles:
- - openstack_compute_dpdk
- - features_lvm_backend
- - linux_system_codename_xenial
- interfaces:
- enp2s0f0:
- role: single_vlan_ctl
- single_address: 10.167.8.102
- enp2s0f1:
- role: single_mgm
- deploy_address: 172.16.49.74
- enp5s0f0:
- role: single_contrail_dpdk_prv
- tenant_address: 192.168.0.102
- dpdk_pci: "'0000:05:00.0'"
- dpdk_mac: '00:1b:21:87:21:98'
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/maas.yml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/maas.yml
deleted file mode 100644
index ed0191c..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/maas.yml
+++ /dev/null
@@ -1,111 +0,0 @@
-classes:
-- system.linux.system.repo.mcp.apt_mirantis.maas
-- system.maas.region.single
-parameters:
- _param:
- maas_region_port: 5240
- maas_cluster_region_port: ${_param:maas_region_port}
- power_user: mcp-qa
- power_password: password
- maas:
- cluster:
- region:
- host: ${_param:deploy_address}:${_param:maas_cluster_region_port}
- region:
- bind:
- host: ${_param:deploy_address}:${_param:maas_region_port}
- subnets:
- 172.16.49.64/26:
- cidr: 172.16.49.64/26
- fabric: fabric-51
- gateway_ip: 172.16.49.65
- iprange:
- end: 172.16.49.119
- start: 172.16.49.77
- fabrics:
- fabric-51:
- description: Fabric for deploy
- #commissioning_scripts:
- # 00-maas-05-simplify-network-interfaces: /etc/maas/files/commisioning_scripts/00-maas-05-simplify-network-interfaces
- machines:
- kvm01: # cz7341-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- # pxe_interface_mac:
- pxe_interface_mac: "0c:c4:7a:33:1f:e4"
- interfaces:
- one1:
- mac: "0c:c4:7a:33:1f:e4"
- mode: "static"
- ip: ${_param:infra_kvm_node01_deploy_address}
- subnet: "10.10.0.0/16" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.161"
- power_password: ${_param:power_password}
- power_type: ipmi
- power_user: ${_param:power_user}
- kvm02: # #cz7342-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:33:20:fc"
- interfaces:
- one1:
- mac: "0c:c4:7a:33:20:fc"
- mode: "static"
- ip: ${_param:infra_kvm_node02_deploy_address}
- subnet: "10.10.0.0/16" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.162"
- power_password: ${_param:power_password}
- power_type: ipmi
- power_user: ${_param:power_user}
- kvm03: # #cz7343-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:31:fb:b6"
- interfaces:
- one1:
- mac: "0c:c4:7a:31:fb:b6"
- mode: "static"
- ip: ${_param:infra_kvm_node03_deploy_address}
- subnet: "10.10.0.0/16" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.163"
- power_password: ${_param:power_password}
- power_type: ipmi
- power_user: ${_param:power_user}
- cmp001: # #cz7345-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:31:f0:12"
- interfaces:
- one1:
- mac: "0c:c4:7a:31:f0:12"
- mode: "static"
- ip: ${_param:infra_kvm_node04_deploy_address}
- subnet: "10.10.0.0/16" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.17"
- power_password: ${_param:power_password}
- power_type: ipmi
- power_user: ${_param:power_user}
- cmp002: # cz7346-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:31:ef:bc"
- interfaces:
- one1:
- mac: "0c:c4:7a:31:ef:bc"
- mode: "static"
- ip: ${_param:infra_kvm_node05_deploy_address}
- subnet: "10.10.0.0/16" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.18"
- power_password: ${_param:power_password}
- power_type: ipmi
- power_user: ${_param:power_user}
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/openstack.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/openstack.yaml
deleted file mode 100644
index 6e9e2c2..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/openstack.yaml
+++ /dev/null
@@ -1,182 +0,0 @@
-{% from 'cookied-bm-contrail-nfv-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-contrail-nfv-maas/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
-
-# install contrail
-- description: Install Opencontrail db on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail db on all nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database' state.sls opencontrail.database
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail control on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail control on all nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail on collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Workaround for https://mirantis.jira.com/browse/PROD-12798
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' service.restart 'keepalived'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# OpenContrail vrouters
-- description: Install Opencontrail client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database:id:1' state.sls 'opencontrail.client'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail client on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls 'opencontrail.client'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls 'opencontrail'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Test Opencontrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' cmd.run 'contrail-status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on ctl
- cmd: salt "ctl01*" cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: salt "ctl01*" cmd.run 'iptables --policy FORWARD ACCEPT'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Hack vrouter (Delete default moun point)
- cmd: salt "cmp*" cmd.run "sed -i 's/exit 0//g' /etc/rc.local; echo 'umount /dev/hugepages; service supervisor-vrouter restart' >> /etc/rc.local; echo 'exit 0' >> /etc/rc.local"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Remove crashes files from /var/crashes/ while vrouter was crashed
- cmd: salt "cmp*" cmd.run "rm -rf /var/crashes/*"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Reboot computes
- cmd: salt --timeout=600 "cmp*" system.reboot
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/overrides-policy.yml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/overrides-policy.yml
deleted file mode 100644
index 1f35a6b..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/overrides-policy.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-parameters:
- nova:
- controller:
- policy:
- context_is_admin: 'role:admin or role:administrator'
- 'compute:create': 'rule:admin_or_owner'
- 'compute:create:attach_network':
- cinder:
- controller:
- policy:
- 'volume:delete': 'rule:admin_or_owner'
- 'volume:extend':
- neutron:
- server:
- policy:
- create_subnet: 'rule:admin_or_network_owner'
- 'get_network:queue_id': 'rule:admin_only'
- 'create_network:shared':
- glance:
- server:
- policy:
- publicize_image: "role:admin"
- add_member:
- keystone:
- server:
- policy:
- admin_or_token_subject: 'rule:admin_required or rule:token_subject'
- heat:
- server:
- policy:
- context_is_admin: 'role:admin and is_admin_project:True'
- deny_stack_user: 'not role:heat_stack_user'
- deny_everybody: '!'
- 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
- 'cloudformation:DescribeStackResources':
- ceilometer:
- server:
- policy:
- segregation: 'rule:context_is_admin'
- 'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt-context-cookiecutter-contrail.yaml
deleted file mode 100644
index 0b00542..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt-context-cookiecutter-contrail.yaml
+++ /dev/null
@@ -1,207 +0,0 @@
-default_context:
- backup_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEA1Hbf7nJ3VGyRxqwPNqnDcspyuJXf0WwuEJJyDxATV0JTuZSz
- jcT4A1XLN/WG8diN0Q5/tYRpuSxNKaz3nPxXjTqK1byrB7jGBhNQeWYHMNoBH3VR
- Kzm4yBGIKaG6k5wYB5kS950zxNHIKQ7Mo4+0WloIlgZTkMrQo98FaD1X9jyVJm7O
- TfIqTwYSJb4TJf/hgL8xntpGC6gRjZDQzlxB25bSP2+u3KzfUyDiaK8hs5PWwnNn
- KfvZmYZmsgA4+D+dQh4YkyfDN6hQL4ttW+2SRpZ/js+KTkqU1Vyn5P5cbf2RwH0G
- VewWyYA2ZK7nXPYH7+ia1rPj6wO3UXEFnxVzowIDAQABAoIBAQDCgenURFrWoWZ7
- ub1bz+MszgZk0mDLOvyZE1j0TUVHl2NK/MW8vlxHwV2AZ3kZI6YBhAKKzSR08Afc
- ZYty3tnQY44CyuzQ7unrWfdMjIl8wbhRcnfS2M8/6jz70CIdTMP7ALqKkhJ4140l
- eXUDMGZuaQp/Pl92qRaFT4GFwtMqivQobCX5/yehd3+mMu8CkK+1U0T/9gWacEdv
- JISAkfpOGkXLmZ/ekkqNFfv6SrNaefaYjqMeGk3ZrFmPAEstqxo0tiUjV5BYXCME
- SsJCF/EnDHxMYzwkqCCGrXY0DbXGqF1B+dO8MqeYTIYyohEJ82vkezP5BqM4L/3Z
- Lec3ypPhAoGBAPh0eBKNgLOhMz+h1DnjdTJAcsa58ZQCH6r+pTVIhwmbCT/3RTJT
- oizYVhtlQyi0lHsxMwtYx+oj1Qacs7jb6UH24d6+oX0JZr/lfxqZVvgmTOoNED3l
- ZdX3xU4GOMhWXS7IEhk750LFGF1k7QLcoULx3u/8dZNgW0kNZUsptkxpAoGBANrq
- m9CFnbzSm20EicfT8FX7Hu6Wl3Lgwnsgc+7+dkm8uST+DdDWjUymkTjeIq/8Z/va
- I+rhURp+r7tLFAy+sC6YeLZ7oHcUIfUwXOdlIJOpdVayF5oiZ5qCSkNcT2oJfFdJ
- Uf3/gwQDxg6CKJIKdnyK1njnfldfoFLZz9Z1Ze4rAoGBAMhfde7Qe/liiihJZRUC
- oiPS4j3u/Ct3wv5uu+JLCczvYfhafU3nMSWlm1wgwJb1e8IWnaoLAb+NAmKAwljV
- 0jrG1saDS03B5UHh3i4feIpMqT8hJfYlKYn0dwVD80tui1wNMrtzGkE5HztDB/qE
- 4PFSi49UNaaT0UsLKKQDkefxAoGAf91+iwowOuTsoX2AGHajLyVRSNwus4uyLIal
- EJgScTlJDuFRIoTe3UGBGy0sJ4yPE9yzE/LtE0Oh0wykNll+wIiQIU4OSN86gmLw
- MLuxjm3xOmUlQgMMboPhanzVacMGnFkYCfqfBM5LdZfyqHJyCIZzhQT5l4EkPKA6
- NDI4CicCgYAtaC63kRjv8NNWo1iuovOLF4pdnYEviT37s24zlf18IKLZTW66AGbX
- 2lLHBiS7SyLEvIpn8/Vwh185CbTitlfRpU5bPzwk4dvKPgUa0eiov/Voe+WPWafQ
- +uErJ3mt7l+dThL1q70aD6Dl1pbMjG5xbIKSmXNrmrrMVN8+pM2BJQ==
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUdt/ucndUbJHGrA82qcNyynK4ld/RbC4QknIPEBNXQlO5lLONxPgDVcs39Ybx2I3RDn+1hGm5LE0prPec/FeNOorVvKsHuMYGE1B5Zgcw2gEfdVErObjIEYgpobqTnBgHmRL3nTPE0cgpDsyjj7RaWgiWBlOQytCj3wVoPVf2PJUmbs5N8ipPBhIlvhMl/+GAvzGe2kYLqBGNkNDOXEHbltI/b67crN9TIOJoryGzk9bCc2cp+9mZhmayADj4P51CHhiTJ8M3qFAvi21b7ZJGln+Oz4pOSpTVXKfk/lxt/ZHAfQZV7BbJgDZkrudc9gfv6JrWs+PrA7dRcQWfFXOj
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-bm-contrail-nfv-maas.local
- cluster_name: cookied-bm-contrail-nfv-maas
- compute_bond_mode: active-backup
- compute_padding_with_zeros: 'True'
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: Rp34QM5SR94AJrAB3RpbDX4Xlqrq6bd119BLjC6TC6e22mfh8fFvFV7HtBk2hq3e
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.8.0/24
- control_vlan: '2422'
- cookiecutter_template_branch: ''
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 172.16.49.65
- deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.49.64/26
- deployment_type: physical
- dns_server01: 172.18.208.44
- dns_server02: 8.8.4.4
- email_address: sgudz@mirantis.com
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.8.241
- infra_kvm01_deploy_address: 172.16.49.67
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.8.242
- infra_kvm02_deploy_address: 172.16.49.68
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.8.243
- infra_kvm03_deploy_address: 172.16.49.69
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.8.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- internal_proxy_enabled: 'False'
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 172.16.49.66
- maas_deploy_cidr: 172.16.49.64/26
- maas_deploy_gateway: 172.16.49.65
- maas_deploy_range_end: 172.16.49.119
- maas_deploy_range_start: 172.16.49.77
- maas_deploy_vlan: '0'
- maas_dhcp_enabled: 'True'
- maas_fabric_name: fabric-51
- maas_hostname: cfg01
- maas_manage_deploy_network: 'True'
- mcp_common_scripts_branch: ''
- mcp_version: proposed
- offline_deployment: 'False'
- opencontrail_analytics_address: 10.167.8.30
- opencontrail_analytics_hostname: nal
- opencontrail_analytics_node01_address: 10.167.8.31
- opencontrail_analytics_node01_hostname: nal01
- opencontrail_analytics_node02_address: 10.167.8.32
- opencontrail_analytics_node02_hostname: nal02
- opencontrail_analytics_node03_address: 10.167.8.33
- opencontrail_analytics_node03_hostname: nal03
- opencontrail_compute_iface_mask: '24'
- opencontrail_control_address: 10.167.8.20
- opencontrail_control_hostname: ntw
- opencontrail_control_node01_address: 10.167.8.21
- opencontrail_control_node01_hostname: ntw01
- opencontrail_control_node02_address: 10.167.8.22
- opencontrail_control_node02_hostname: ntw02
- opencontrail_control_node03_address: 10.167.8.23
- opencontrail_control_node03_hostname: ntw03
- opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.100
- opencontrail_router01_hostname: rtr01
- opencontrail_router02_address: 10.167.8.101
- opencontrail_router02_hostname: rtr02
- openssh_groups: ''
- openstack_benchmark_node01_address: 10.167.8.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.8
- openstack_compute_rack01_tenant_subnet: 192.168.0
- openstack_control_address: 10.167.8.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.8.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.8.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.8.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.8.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.8.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.8.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.8.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_message_queue_address: 10.167.8.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.8.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.8.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.8.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: opencontrail
- openstack_neutron_bgp_vpn: 'False'
- openstack_neutron_bgp_vpn_driver: bagpipe
- openstack_nfv_dpdk_enabled: 'True'
- openstack_nfv_sriov_enabled: 'True'
- openstack_nfv_sriov_network: physnet1
- openstack_nfv_sriov_numvfs: '7'
- openstack_nfv_sriov_pf_nic: enp5s0f1
- openstack_nova_compute_hugepages_count: '16'
- openstack_nova_compute_nfv_req_enabled: 'True'
- openstack_nova_compute_reserved_host_memory_mb: '900'
- openstack_nova_cpu_pinning: 1,2,7,8
- openstack_proxy_address: 10.167.8.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.8.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.8.82
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.8.19
- openstack_version: ocata
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- salt_api_password: LXqVfP7rDvPTrJ3Auwnt2nzhX2vBf0Ua
- salt_api_password_hash: $6$BMhMUqcE$bxEWV1ZlP.8XlTyEMA7ceNjygMY4pylL4SdboiMlIOvni6i3gTxdxB7rGEUHLfKz6iSzYe5VDY71xi5cKdjpS.
- salt_master_address: 172.16.49.66
- salt_master_hostname: cfg01
- salt_master_management_address: 172.16.49.66
- shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- stacklight_enabled: 'True'
- stacklight_log_address: 10.167.8.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.8.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.8.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.8.63
- stacklight_log_node03_hostname: log03
- stacklight_long_term_storage_type: influxdb
- stacklight_monitor_address: 10.167.8.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.8.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.8.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.8.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 10.167.8.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.8.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.8.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.8.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 192.168.0.0/24
- tenant_vlan: '2423'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'True'
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt-context-environment.yaml
deleted file mode 100644
index 368d1f7..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt-context-environment.yaml
+++ /dev/null
@@ -1,247 +0,0 @@
-nodes:
- # Virtual Control Plane nodes
-
- ctl01.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_ctl
-
- ctl02.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ctl03.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs01.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs02.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs03.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg01.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg02.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg03.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- prx01.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- prx02.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon01.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon02.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon03.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- nal01.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: opencontrail_analytics_node01
- roles:
- - opencontrail_analytics
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- nal02.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: opencontrail_analytics_node02
- roles:
- - opencontrail_analytics
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- nal03.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: opencontrail_analytics_node03
- roles:
- - opencontrail_analytics
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- ntw01.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: opencontrail_control_node01
- roles:
- - opencontrail_control
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- ntw02.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: opencontrail_control_node02
- roles:
- - opencontrail_control
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- ntw03.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: opencontrail_control_node03
- roles:
- - opencontrail_control
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- mtr01.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr02.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr03.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log01.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log02.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log03.cookied-bm-contrail-nfv-maas.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
-# bmk01.cookied-bm-contrail-nfv-maas.local:
-# reclass_storage_name: openstack_benchmark_node01
-# roles:
-# - openstack_benchmark
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml
deleted file mode 100644
index 31e9736..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml
+++ /dev/null
@@ -1,244 +0,0 @@
-{% from 'cookied-bm-contrail-nfv-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-contrail-nfv-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-bm-contrail-nfv-maas/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-contrail-nfv-maas/underlay.yaml' import REPOSITORY_SUITE with context %}
-{% from 'cookied-bm-contrail-nfv-maas/underlay.yaml' import ETH1_IP_ADDRESS_CFG01 with context %}
-{% from 'cookied-bm-contrail-nfv-maas/underlay.yaml' import MAAS_DHCP_POOL_START with context %}
-{% from 'cookied-bm-contrail-nfv-maas/underlay.yaml' import MAAS_DHCP_POOL_END with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-contrail-nfv-maas') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-- description: Upload maas template
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: maas.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/
- node_name: {{ HOSTNAME_CFG01 }}
- skip_fail: False
-
-- description: "Workaround for rack01 compute generator"
- cmd: |
- set -e;
- # Remove rack01 key
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
- # Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: "WR for changing image to proposed"
- cmd: |
- set -e;
- # Add message_queu host for opencontrail
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcp{{ REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster//{{ LAB_CONFIG_NAME }}infra/init.yml;
- reclass-tools add-key parameters._param.salt_control_trusty_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-14-04-x64-mcp{{ REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster//{{ LAB_CONFIG_NAME }}infra/init.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-- description: "WR for dpdk pci to be in correct quotes"
- cmd: |
- set -e;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.compute_vrouter_dpdk_pci '0000:05:00.0' /srv/salt/reclass/nodes/_generated/cmp001.{{ DOMAIN_NAME }}.yml;
- reclass-tools add-key parameters._param.compute_vrouter_dpdk_pci '0000:05:00.0' /srv/salt/reclass/nodes/_generated/cmp002.{{ DOMAIN_NAME }}.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Temporary WR for correct bridge name according to envoronment templates
- cmd: |
- sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update minion information
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun openssh after env model is generated
- cmd: |
- salt-call state.sls openssh
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure rsyslog on nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure maas.cluster
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls maas.cluster
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure maas.region
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls maas.region
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: Configure dhcp for fabric
- cmd: |
- touch /root/API_KEY_FILE;
- export PROFILE=mirantis;
- export API_KEY_FILE=/root/API_KEY_FILE;
- export MAAS_URL=http://{{ ETH1_IP_ADDRESS_CFG01 }}:5240/MAAS;
- maas-region apikey --username=$PROFILE > $API_KEY_FILE;
- maas login $PROFILE $MAAS_URL - < $API_KEY_FILE;
- maas $PROFILE ipranges create type=dynamic start_ip={{ MAAS_DHCP_POOL_START }} end_ip={{ MAAS_DHCP_POOL_END }} comment='Reserved dynamic range for HW and VCP nodes'
- maas $PROFILE ipranges create type=reserved start_ip={{ ETH1_IP_ADDRESS_CFG01 }} end_ip={{ ETH1_IP_ADDRESS_CFG01 }} comment='This is a reserved IP for cfg with maas node';
- maas $PROFILE vlan update 51 0 dhcp_on=True primary_rack=cfg01;
- maas $PROFILE nodes read |grep status -A 1 -B 1;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Comissioning nodes
- cmd: |
- salt-call state.apply maas.machines;
- salt-call state.apply maas.machines.wait_for_ready;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Assign IPs
- cmd: |
- salt-call state.sls maas.machines.assign_ip;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Deploying BM nodes
- cmd: |
- salt-call maas.deploy_machines || true
- salt-call state.apply maas.machines.wait_for_deployed;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure ntp on all nodes
- cmd: |
- salt '*' state.sls ntp;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-########################################
-# Spin up Control Plane VMs on KVM nodes
-########################################
-
-- description: Execute 'libvirt' states to create necessary libvirt networks
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Syncing before salt control state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Create VMs for control plane
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
- cmd: |
- salt-key -l acc| sort > /tmp/current_keys.txt &&
- salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 20, delay: 30}
- skip_fail: false
-
-#########################################
-# Configure all running salt minion nodes
-#########################################
-
-- description: Hack resolv.conf on VCP nodes for internal services access
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Refresh pillars on all minions
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync all salt resources
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Show reclass-salt --top for generated nodes
- cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/sl.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/sl.yaml
deleted file mode 100644
index 3435917..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/sl.yaml
+++ /dev/null
@@ -1,261 +0,0 @@
-{% from 'cookied-bm-contrail-nfv-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-#Launch containers
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Alerta if it is exists
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
- cmd: |
- FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Fluentd service presence: ${FLUENTD_SERVICE}";
- if [[ "$FLUENTD_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- else
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
- cmd: |
- CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Ceilometer service presence: ${CEILO}";
- if [[ "$CEILO" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Remote Collector in Docker Swarm for Openstack deployments
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 6617855..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,76 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- - sudo route add default gw {gateway} ens3
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-
- - export MAAS_PXE_INTERFACE_NAME=ens4
- - export MAAS_PXE_INTERFACE_ADDRESS={{ os_env('MAAS_PXE_INTERFACE_ADDRESS', '172.16.49.66') }}
- - export MAAS_DHCP_POOL_NETMASK={{ os_env('MAAS_DHCP_POOL_NETMASK', '255.255.255.192') }}
- - export MAAS_DHCP_POOL_NETMASK_PREFIX={{ os_env('MAAS_DHCP_POOL_NETMASK_PREFIX', '26') }}
- - export MAAS_DHCP_POOL_START={{ os_env('MAAS_DHCP_POOL_START', '172.16.49.77') }}
- - export MAAS_DHCP_POOL_END={{ os_env('MAAS_DHCP_POOL_END', '172.16.49.119') }}
- - ifconfig $MAAS_PXE_INTERFACE_NAME $MAAS_PXE_INTERFACE_ADDRESS/$MAAS_DHCP_POOL_NETMASK_PREFIX
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay.yaml
deleted file mode 100644
index 0b689aa..0000000
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay.yaml
+++ /dev/null
@@ -1,130 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-bm-contrail-nfv-maas/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-bm-contrail-nfv-maas/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-contrail-nfv-maas') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-
-{% set MAAS_DHCP_POOL_START = os_env('MAAS_DHCP_POOL_START', '172.16.49.77') %}
-{% set MAAS_DHCP_POOL_END = os_env('MAAS_DHCP_POOL_END', '172.16.49.119') %}
-
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-bm-contrail-nfv-maas_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- admin-pool01:
- net: {{ os_env('MAAS_ADMIN_ADDRESS_POOL01', '10.50.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +2
- ip_ranges:
- dhcp: [+90, -10]
-
- provisioning-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
- params:
- ip_reserved:
- gateway: +61
- l2_network_device: +61
- default_{{ HOSTNAME_CFG01 }}: +2
- virtual_{{ HOSTNAME_CFG01 }}: +2
- ip_ranges:
- dhcp: [+12, +55]
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- provisioning: provisioning-pool01
-
- l2_network_devices:
- # Ironic management interface
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- provisioning:
- address_pool: provisioning-pool01
- dhcp: false
- forward:
- mode: !os_env MAAS_PXE_IFACE_MODE, bridge
- parent_iface:
- phys_dev: !os_env MAAS_PXE_IFACE
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 180
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
- - label: ens4
- l2_network_device: provisioning
- interface_model: *interface_model
-
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - provisioning
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml
deleted file mode 100644
index ac23ec1..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_KVM01 with context %}
-{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_KVM02 with context %}
-{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_KVM03 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/lab04-physical-inventory.yaml
deleted file mode 100644
index 736a356..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/lab04-physical-inventory.yaml
+++ /dev/null
@@ -1,83 +0,0 @@
-nodes:
- cfg01.cookied-bm-contrail40-nfv.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- # Physical nodes
-
- kvm01.cookied-bm-contrail40-nfv.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm02.cookied-bm-contrail40-nfv.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm03.cookied-bm-contrail40-nfv.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- cmp001.cookied-bm-contrail40-nfv.local:
- reclass_storage_name: openstack_compute_node01
- roles:
- - openstack_compute_dpdk
- - openstack_compute_sriov
- - features_lvm_backend
- - linux_system_codename_xenial
- interfaces:
- enp2s0f1:
- role: single_mgm
- deploy_address: 172.16.49.73
- enp5s0f0:
- role: single_contrail_dpdk_vlan_prv
- tenant_address: 192.168.0.101
- dpdk_pci: "'0000:05:00.0'"
- dpdk_mac: '90:e2:ba:19:c2:18'
- enp2s0f0:
- role: single_vlan_ctl
- single_address: 10.167.8.101
-
- cmp002.cookied-bm-contrail40-nfv.local:
- reclass_storage_name: openstack_compute_node02
- roles:
- - openstack_compute_dpdk
- - openstack_compute_sriov
- - features_lvm_backend
- - linux_system_codename_xenial
- interfaces:
- enp2s0f1:
- role: single_mgm
- deploy_address: 172.16.49.74
- enp5s0f0:
- role: single_contrail_dpdk_vlan_prv
- tenant_address: 192.168.0.102
- dpdk_pci: "'0000:05:00.0'"
- dpdk_mac: '00:1b:21:87:21:98'
- enp2s0f0:
- role: single_vlan_ctl
- single_address: 10.167.8.102
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml
deleted file mode 100644
index 4eae932..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml
+++ /dev/null
@@ -1,325 +0,0 @@
-{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% set PATTERN = os_env('PATTERN', 'false') %}
-{% set RUN_TEMPEST = os_env('RUN_TEMPEST', 'false') %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
-
-- description: WR Install cinder volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:volume' state.sls cinder
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
-
-# install contrail
-
-- description: Install Docker services
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host' match.pillar 'docker:host' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host' state.sls docker.host
- fi; sleep 10;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 20}
- skip_fail: false
-
-- description: Install opencontrail database services on first minion
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 20}
- skip_fail: false
-
-- description: Install opencontrail database services
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database' state.sls opencontrail.database
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail control services on first minion
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail control services
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail collectors on first minion
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:collector and *01*' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail collectors
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 20}
- skip_fail: false
-
-- description: Spawn Opencontrail docker images
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control or I@opencontrail:collector' state.sls docker.client && sleep 15;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: Finalize opencontrail services
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 30}
- skip_fail: false
-
-- description: Finalize opencontrail services
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:client and not I@opencontrail:compute' state.sls opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Finalize opencontrail services
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 30}
- skip_fail: true
-
-- description: Check contrail status
- cmd: sleep 15; salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Reboot computes
- cmd: |
- salt "cmp*" system.reboot;
- sleep 600;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Remove crashes files from /var/crashes/ while vrouter was crashed
- cmd: salt "cmp*" cmd.run "rm -rf /var/crashes/*"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Apply Opencontrail compute
- cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 30}
- skip_fail: false
-
-- description: Apply Opencontrail compute
- cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Check status for contrail services
- cmd: |
- sleep 15;
- salt -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Hack resolv.conf on VCP nodes for internal services access
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Create heat-net before external net create
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create heat-net'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create public network for contrail
- cmd: |
- salt 'ntw01*' contrail.virtual_network_create public '{"external":true,"ip_prefix":"192.168.200.0","ip_prefix_len":24,"asn":64512,"target":10000}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Steps from neutron client for contrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create heat-net 10.20.30.0/24 --allocation-pool start=10.20.30.10,end=10.20.30.254 --gateway 10.20.30.1 --name heat-subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Steps from neutron client for contrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create heat-router'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Steps from neutron client for contrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set heat-router public'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Steps from neutron client for contrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add heat-router heat-subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Fix default security group for access to external net from outside
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule list --column ID -f value | xargs openstack security group rule delete';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol tcp';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol tcp';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol icmp';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol icmp';
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-# Starting prepare runtest
-
-- description: Upload tempest template
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: runtest.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/
- node_name: {{ HOSTNAME_CFG01 }}
- skip_fail: False
-
-- description: Include class with tempest template into cfg node
- cmd: |
- sed -i 's/classes\:/classes\:\n- cluster.{{ LAB_CONFIG_NAME }}.infra.runtest/g' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml;
- salt '*' saltutil.refresh_pillar;
- salt '*' saltutil.sync_all;
- salt 'ctl01*' pkg.install docker.io;
- salt 'ctl01*' cmd.run 'iptables --policy FORWARD ACCEPT';
- salt 'cfg01*' state.sls salt.minion && sleep 20;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Enforce keystone client
- cmd: |
- salt 'cfg01*' state.sls keystone.client;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Steps from nova client for dpdk
- cmd: |
- . /root/keystonercv3; nova flavor-create m1.extra_tiny_test 998 1024 5 1;
- nova flavor-create m1.tiny_test 999 1024 5 1;
- nova flavor-key m1.extra_tiny_test set hw:mem_page_size=1GB;
- nova flavor-key m1.tiny_test set hw:mem_page_size=1GB;
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Upload cirros image
- cmd: |
- salt 'cfg01*' state.sls glance.client;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Generate tempest config
- cmd: |
- salt 'cfg01*' state.sls runtest;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Download cirros image for runtest
- cmd: |
- wget http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img -O /tmp/TestCirros-0.3.5.img
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Test future contrail manipulation
- cmd: |
- apt install crudini jq -y;
- crudini --set /tmp/test/tempest.conf auth tempest_roles admin;
- crudini --set /tmp/test/tempest.conf patrole custom_policy_files /etc/opencontrail/policy.json;
- crudini --set /tmp/test/tempest.conf sdn service_name opencontrail;
- cat /tmp/test/tempest.conf;
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Run tempest from new docker image
- cmd: |
- OPENSTACK_VERSION=`salt-call --out=newline_values_only pillar.get _param:openstack_version`;
- docker run --name "run-tempest-yml" -d -e ARGS="-r test -w 2" -v /tmp/test/tempest.conf:/etc/tempest/tempest.conf -v /tmp/:/tmp/ -v /tmp/test:/root/tempest -v /etc/ssl/certs/:/etc/ssl/certs/ docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:$OPENSTACK_VERSION /bin/bash -c "run-tempest";
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Test Wait container script
- cmd: |
- report_file=`find /tmp/test -maxdepth 1 -name 'report_*xml' -print -quit`;
- if [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ -f "$report_file" ];
- then echo "All done!"; docker logs run-tempest-yml;
- elif [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ ! -f "$report_file" ];
- then echo "Exit without report!"; docker logs run-tempest-yml;
- else echo "Tempest not finished... ";sleep 900; false;
- fi
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 25, delay: 30}
- skip_fail: false
-
-- description: Download xml results
- download:
- remote_path: /tmp/test/
- remote_filename: "report_*.xml"
- local_path: {{ os_env('PWD') }}
- node_name: {{ HOSTNAME_CTL01 }}
- skip_fail: true
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/runtest.yml b/tcp_tests/templates/cookied-bm-contrail40-nfv/runtest.yml
deleted file mode 100644
index f0d6d8a..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/runtest.yml
+++ /dev/null
@@ -1,47 +0,0 @@
-classes:
-- service.runtest.tempest
-- service.runtest.tempest.public_net
-- service.runtest.tempest.services.manila.glance
-parameters:
- _param:
- glance_image_cirros_location: http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img
- glance_image_fedora_location: http://cz8133.bud.mirantis.net:8099/Fedora-Cloud-Base-27-1.6.x86_64.qcow2
- glance_image_manila_location: http://cz8133.bud.mirantis.net:8099/manila-service-image-master.qcow2
- openstack_public_neutron_subnet_allocation_end: 192.168.200.220
- openstack_public_neutron_subnet_allocation_start: 192.168.200.130
- openstack_public_neutron_subnet_cidr: 192.168.200.0/24
- openstack_public_neutron_subnet_gateway: 192.168.200.1
- runtest_tempest_cfg_dir: /tmp/test
- runtest_tempest_cfg_name: tempest.conf
- runtest_tempest_public_net: public
- tempest_test_target: ctl01*
- neutron:
- client:
- enabled: true
- runtest:
- enabled: true
- keystonerc_node: ctl01*
- tempest:
- DEFAULT:
- log_file: tempest.log
- cfg_dir: ${_param:runtest_tempest_cfg_dir}
- cfg_name: ${_param:runtest_tempest_cfg_name}
- compute:
- min_compute_nodes: 2
- convert_to_uuid:
- network:
- public_network_id: ${_param:runtest_tempest_public_net}
- enabled: true
- heat_plugin:
- build_timeout: '600'
- put_keystone_rc_enabled: false
- put_local_image_file_enabled: false
- share:
- capability_snapshot_support: true
- run_driver_assisted_migration_tests: false
- run_manage_unmanage_snapshot_tests: false
- run_manage_unmanage_tests: false
- run_migration_with_preserve_snapshots_tests: false
- run_quota_tests: true
- run_replication_tests: false
- run_snapshot_tests: true
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-ocata.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-ocata.yaml
deleted file mode 100644
index 7825f00..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-ocata.yaml
+++ /dev/null
@@ -1,253 +0,0 @@
-default_context:
- backup_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
- +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
- qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
- m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
- 7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
- 2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
- HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
- AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
- o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
- 5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
- XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
- AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
- USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
- uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
- QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
- 98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
- r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
- qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
- CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
- p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
- 79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
- NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
- CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
- XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
- N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- opencontrail_compute_iface: enp5s0f0.${_param:tenant_vlan}
- openstack_nfv_dpdk_enabled: 'True'
- openstack_nfv_sriov_enabled: 'True'
- openstack_nfv_sriov_network: physnet1
- openstack_nfv_sriov_numvfs: '7'
- openstack_nfv_sriov_pf_nic: enp5s0f1
- openstack_nova_compute_hugepages_count: '16'
- openstack_nova_compute_nfv_req_enabled: 'True'
- openstack_nova_cpu_pinning: 6,7,8,9,10,11
-
- cicd_control_node01_address: 10.167.8.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.8.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.8.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.8.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
- oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
- IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
- kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
- wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
- 27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
- 5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
- lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
- k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
- 3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
- dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
- 0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
- qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
- BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
- UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
- VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
- 1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
- nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
- Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
- FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
- HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
- Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
- poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
- 17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
- l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
- -----END RSA PRIVATE KEY-----
- cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
- cluster_domain: cookied-bm-4.0-contrail.local
- cluster_name: cookied-bm-4.0-contrail
- opencontrail_version: 4.0
- linux_repo_contrail_component: oc40
- compute_bond_mode: active-backup
- compute_padding_with_zeros: 'True'
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.8.0/24
- control_vlan: '2422'
- cookiecutter_template_branch: ''
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 172.16.49.65
- deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.49.64/26
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: sgudz@mirantis.com
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.8.241
- infra_kvm01_deploy_address: 172.16.49.67
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.8.242
- infra_kvm02_deploy_address: 172.16.49.68
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.8.243
- infra_kvm03_deploy_address: 172.16.49.69
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.8.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- internal_proxy_enabled: 'False'
- kqueen_custom_mail_enabled: 'False'
- kqueen_enabled: 'False'
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 172.16.49.66
- maas_deploy_cidr: 172.16.49.64/26
- maas_deploy_gateway: 172.16.49.65
- maas_deploy_range_end: 172.16.49.119
- maas_deploy_range_start: 172.16.49.77
- maas_deploy_vlan: '0'
- maas_dhcp_enabled: 'True'
- maas_fabric_name: fabric-51
- maas_hostname: cfg01
- maas_manage_deploy_network: 'True'
- mcp_common_scripts_branch: ''
- mcp_version: proposed
- offline_deployment: 'False'
- opencontrail_analytics_address: 10.167.8.30
- opencontrail_analytics_hostname: nal
- opencontrail_analytics_node01_address: 10.167.8.31
- opencontrail_analytics_node01_hostname: nal01
- opencontrail_analytics_node02_address: 10.167.8.32
- opencontrail_analytics_node02_hostname: nal02
- opencontrail_analytics_node03_address: 10.167.8.33
- opencontrail_analytics_node03_hostname: nal03
- opencontrail_compute_iface_mask: '24'
- opencontrail_control_address: 10.167.8.20
- opencontrail_control_hostname: ntw
- opencontrail_control_node01_address: 10.167.8.21
- opencontrail_control_node01_hostname: ntw01
- opencontrail_control_node02_address: 10.167.8.22
- opencontrail_control_node02_hostname: ntw02
- opencontrail_control_node03_address: 10.167.8.23
- opencontrail_control_node03_hostname: ntw03
- opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.220
- opencontrail_router01_hostname: rtr01
- opencontrail_router02_address: 10.167.8.101
- opencontrail_router02_hostname: rtr02
- openssh_groups: ''
- openstack_benchmark_node01_address: 10.167.8.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.8
- openstack_compute_rack01_tenant_subnet: 192.168.0
- openstack_control_address: 10.167.8.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.8.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.8.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.8.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.8.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.8.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.8.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.8.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_message_queue_address: 10.167.8.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.8.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.8.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.8.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: opencontrail
- openstack_neutron_bgp_vpn: 'False'
- openstack_neutron_bgp_vpn_driver: bagpipe
- openstack_nova_compute_reserved_host_memory_mb: '900'
- openstack_proxy_address: 10.167.8.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.8.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.8.82
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.8.19
- openstack_version: ocata
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
- salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
- salt_master_address: 172.16.49.66
- salt_master_hostname: cfg01
- salt_master_management_address: 172.16.49.66
- shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- stacklight_enabled: 'True'
- stacklight_log_address: 10.167.8.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.8.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.8.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.8.63
- stacklight_log_node03_hostname: log03
- stacklight_long_term_storage_type: influxdb
- stacklight_monitor_address: 10.167.8.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.8.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.8.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.8.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 10.167.8.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.8.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.8.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.8.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.220
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 192.168.0.0/24
- tenant_vlan: '2423'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'True'
- openldap_domain: cookied-bm-4.0-contrail.local
- openldap_enabled: 'True'
- openldap_organisation: ${_param:cluster_name}
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail.yaml
deleted file mode 100644
index 8372b67..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail.yaml
+++ /dev/null
@@ -1,253 +0,0 @@
-default_context:
- backup_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
- +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
- qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
- m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
- 7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
- 2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
- HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
- AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
- o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
- 5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
- XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
- AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
- USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
- uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
- QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
- 98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
- r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
- qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
- CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
- p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
- 79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
- NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
- CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
- XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
- N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- opencontrail_compute_iface: enp5s0f0.${_param:tenant_vlan}
- openstack_nfv_dpdk_enabled: 'True'
- openstack_nfv_sriov_enabled: 'True'
- openstack_nfv_sriov_network: physnet1
- openstack_nfv_sriov_numvfs: '7'
- openstack_nfv_sriov_pf_nic: enp5s0f1
- openstack_nova_compute_hugepages_count: '16'
- openstack_nova_compute_nfv_req_enabled: 'True'
- openstack_nova_cpu_pinning: 6,7,8,9,10,11
-
- cicd_control_node01_address: 10.167.8.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.8.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.8.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.8.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
- oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
- IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
- kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
- wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
- 27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
- 5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
- lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
- k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
- 3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
- dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
- 0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
- qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
- BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
- UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
- VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
- 1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
- nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
- Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
- FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
- HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
- Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
- poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
- 17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
- l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
- -----END RSA PRIVATE KEY-----
- cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
- cluster_domain: cookied-bm-4.0-contrail.local
- cluster_name: cookied-bm-4.0-contrail
- opencontrail_version: 4.0
- linux_repo_contrail_component: oc40
- compute_bond_mode: active-backup
- compute_padding_with_zeros: 'True'
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.8.0/24
- control_vlan: '2422'
- cookiecutter_template_branch: ''
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 172.16.49.65
- deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.49.64/26
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: sgudz@mirantis.com
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.8.241
- infra_kvm01_deploy_address: 172.16.49.67
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.8.242
- infra_kvm02_deploy_address: 172.16.49.68
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.8.243
- infra_kvm03_deploy_address: 172.16.49.69
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.8.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- internal_proxy_enabled: 'False'
- kqueen_custom_mail_enabled: 'False'
- kqueen_enabled: 'False'
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 172.16.49.66
- maas_deploy_cidr: 172.16.49.64/26
- maas_deploy_gateway: 172.16.49.65
- maas_deploy_range_end: 172.16.49.119
- maas_deploy_range_start: 172.16.49.77
- maas_deploy_vlan: '0'
- maas_dhcp_enabled: 'True'
- maas_fabric_name: fabric-51
- maas_hostname: cfg01
- maas_manage_deploy_network: 'True'
- mcp_common_scripts_branch: ''
- mcp_version: proposed
- offline_deployment: 'False'
- opencontrail_analytics_address: 10.167.8.30
- opencontrail_analytics_hostname: nal
- opencontrail_analytics_node01_address: 10.167.8.31
- opencontrail_analytics_node01_hostname: nal01
- opencontrail_analytics_node02_address: 10.167.8.32
- opencontrail_analytics_node02_hostname: nal02
- opencontrail_analytics_node03_address: 10.167.8.33
- opencontrail_analytics_node03_hostname: nal03
- opencontrail_compute_iface_mask: '24'
- opencontrail_control_address: 10.167.8.20
- opencontrail_control_hostname: ntw
- opencontrail_control_node01_address: 10.167.8.21
- opencontrail_control_node01_hostname: ntw01
- opencontrail_control_node02_address: 10.167.8.22
- opencontrail_control_node02_hostname: ntw02
- opencontrail_control_node03_address: 10.167.8.23
- opencontrail_control_node03_hostname: ntw03
- opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.220
- opencontrail_router01_hostname: rtr01
- opencontrail_router02_address: 10.167.8.101
- opencontrail_router02_hostname: rtr02
- openssh_groups: ''
- openstack_benchmark_node01_address: 10.167.8.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.8
- openstack_compute_rack01_tenant_subnet: 192.168.0
- openstack_control_address: 10.167.8.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.8.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.8.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.8.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.8.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.8.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.8.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.8.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_message_queue_address: 10.167.8.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.8.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.8.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.8.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: opencontrail
- openstack_neutron_bgp_vpn: 'False'
- openstack_neutron_bgp_vpn_driver: bagpipe
- openstack_nova_compute_reserved_host_memory_mb: '900'
- openstack_proxy_address: 10.167.8.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.8.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.8.82
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.8.19
- openstack_version: pike
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
- salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
- salt_master_address: 172.16.49.66
- salt_master_hostname: cfg01
- salt_master_management_address: 172.16.49.66
- shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- stacklight_enabled: 'True'
- stacklight_log_address: 10.167.8.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.8.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.8.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.8.63
- stacklight_log_node03_hostname: log03
- stacklight_long_term_storage_type: influxdb
- stacklight_monitor_address: 10.167.8.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.8.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.8.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.8.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 10.167.8.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.8.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.8.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.8.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.220
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 192.168.0.0/24
- tenant_vlan: '2423'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'True'
- openldap_domain: cookied-bm-4.0-contrail.local
- openldap_enabled: 'True'
- openldap_organisation: ${_param:cluster_name}
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-environment.yaml
deleted file mode 100644
index d43b66a..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-environment.yaml
+++ /dev/null
@@ -1,273 +0,0 @@
-nodes:
- # Virtual Control Plane nodes
- cid01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: cicd_control_node01
- roles:
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- cid02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: cicd_control_node02
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- cid03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: cicd_control_node03
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ctl01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_ctl
-
- ctl02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ctl03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- prx01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- prx02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- nal01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node01
- roles:
- - opencontrail_analytics
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- nal02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node02
- roles:
- - opencontrail_analytics
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- nal03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node03
- roles:
- - opencontrail_analytics
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ntw01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node01
- roles:
- - opencontrail_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ntw02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node02
- roles:
- - opencontrail_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ntw03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node03
- roles:
- - opencontrail_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
-# bmk01.cookied-bm-mcp-ocata-contrail.local:
-# reclass_storage_name: openstack_benchmark_node01
-# roles:
-# - openstack_benchmark
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
deleted file mode 100644
index 594f46d..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
+++ /dev/null
@@ -1,184 +0,0 @@
-{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-contrail40-nfv') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = os_env('CLUSTER_CONTEXT_NAME', 'salt-context-cookiecutter-contrail.yaml') %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
-
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-- description: "Workaround for rack01 compute generator"
- cmd: |
- set -e;
- # Remove rack01 key
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/nodes.yml;
- # Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Temporary WR for correct bridge name according to envoronment templates
- cmd: |
- sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-- description: Update minion information
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun openssh after env model is generated
- cmd: |
- salt-call state.sls openssh
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: "WR for dpdk pci to be in correct quotes"
- cmd: |
- set -e;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.compute_vrouter_dpdk_pci '0000:05:00.0' /srv/salt/reclass/nodes/_generated/cmp001.{{ DOMAIN_NAME }}.yml;
- reclass-tools add-key parameters._param.compute_vrouter_dpdk_pci '0000:05:00.0' /srv/salt/reclass/nodes/_generated/cmp002.{{ DOMAIN_NAME }}.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.network.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: WR for mounting 1G hugepages before linux.state
- cmd: |
- salt 'cmp*' state.sls linux.system.hugepages;
- salt 'cmp*' cmd.run "mount -o mode=775,pagesize=1G -t hugetlbfs Hugetlbfs-kvm /mnt/hugepages_1G";
- salt 'cmp*' cmd.run "echo 16 | sudo tee /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-########################################
-# Spin up Control Plane VMs on KVM nodes
-########################################
-
-- description: Execute 'libvirt' states to create necessary libvirt networks
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Create VMs for control plane
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
- cmd: |
- salt-key -l acc| sort > /tmp/current_keys.txt &&
- salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 20, delay: 30}
- skip_fail: false
-
-#########################################
-# Configure all running salt minion nodes
-#########################################
-
-- description: Hack resolv.conf on VCP nodes for internal services access
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Refresh pillars on all minions
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync all salt resources
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Show reclass-salt --top for generated nodes
- cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: "Lab04 workaround: Give each node root acces with key from cfg01"
- cmd: |
- set -e;
- set -x;
- key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
- salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
- salt '*' cmd.run "service sshd restart"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: "Lab04 workaround: Control network access from cfg01 node using sshuttle via kvm01"
- cmd: |
- set -e;
- set -x;
- KVM01_DEPLOY_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:infra_kvm_node01_deploy_address);
- apt-get install -y sshuttle;
- sshuttle -r ${KVM01_DEPLOY_ADDRESS} 10.167.8.0/24 -D >/dev/null;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/sl.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/sl.yaml
deleted file mode 100644
index 5810c71..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/sl.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-sl.yaml' as SHARED_SL with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
-{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
-{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
-{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
-{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
-{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
-{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
-{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--meta-data.yaml
deleted file mode 100644
index a594a53..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml
deleted file mode 100644
index cc69c64..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,78 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604-hwe.yaml
deleted file mode 100644
index ba69177..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604-hwe.yaml
+++ /dev/null
@@ -1,125 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup {interface_name}
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- # - echo "Preparing base OS"
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- # Install latest kernel
- # - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- # - reboot
- ########################################################
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- # The loopback network interface
- auto lo
- iface lo inet loopback
- auto {interface_name}
- iface {interface_name} inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604.yaml
deleted file mode 100644
index bdcd21d..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,121 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup {interface_name}
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- # - echo "Preparing base OS"
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- ########################################################
- # Node is ready, allow SSH access
- # - echo "Allow SSH access ..."
- # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- # The loopback network interface
- auto lo
- iface lo inet loopback
- auto {interface_name}
- iface {interface_name} inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay.yaml
deleted file mode 100644
index ed36fad..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay.yaml
+++ /dev/null
@@ -1,571 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-
-#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-contrail40-nfv') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP003 = os_env('HOSTNAME_CMP003', 'cmp003.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM04 = os_env('HOSTNAME_KVM04', 'kvm04.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
-{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
-{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.68') %}
-{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.69') %}
-{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.73') %}
-{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
-{% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.49.121') %}
-{% set ETH0_IP_ADDRESS_KVM04 = os_env('ETH0_IP_ADDRESS_KVM04', '172.16.49.122') %}
-# {% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
-
-{% import 'cookied-bm-contrail40-nfv/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-bm-contrail40-nfv/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
-{% import 'cookied-bm-contrail40-nfv/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_HWE with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
- - &cloudinit_user_data_hwe {{ CLOUDINIT_USER_DATA_HWE }}
-
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-bm-contrail40-nfv_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
- params:
- ip_reserved:
- gateway: +62
- l2_network_device: +61
- default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
- default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
- default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
- default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
- default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
- default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
- default_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
- default_{{ HOSTNAME_KVM04 }}: {{ ETH0_IP_ADDRESS_KVM04 }}
- # default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
- virtual_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
- virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
- virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
- virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
- virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
- virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
- virtual_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
- virtual_{{ HOSTNAME_KVM04 }}: {{ ETH0_IP_ADDRESS_KVM04 }}
- # virtual_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
- #ip_ranges:
- # dhcp: [+2, -4]
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: -2
-
- groups:
-
- - name: virtual
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-
- network_pools:
- admin: admin-pool01
-
- l2_network_devices:
- # Ironic management interface
- admin:
- address_pool: admin-pool01
- dhcp: false
- parent_iface:
- phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
- #- label: ens4
- # l2_network_device: private
- # interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- #ens4:
- # networks:
- # - private
-
-
- - name: default
- driver:
- name: devops_driver_ironic
- params:
- os_auth_token: fake-token
- ironic_url: !os_env IRONIC_URL # URL that will be used by fuel-devops
- # to access Ironic API
- # Agent URL that is accessible from deploying node when nodes
- # are bootstrapped with PXE. Usually PXE/provision network address is used.
- agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
- agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
-
- network_pools:
- admin: admin-pool01
-
- nodes:
-
- # - name: {{ HOSTNAME_CFG01 }}
- # role: salt_master
- # params:
- # ipmi_user: !os_env IPMI_USER
- # ipmi_password: !os_env IPMI_PASSWORD
- # ipmi_previlegies: OPERATOR
- # ipmi_host: !os_env IPMI_HOST_CFG01 # hostname or IP address
- # ipmi_lan_interface: lanplus
- # ipmi_port: 623
-
- # root_volume_name: system # see 'volumes' below
- # cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: enp3s0f1 # see 'interfaces' below.
- # volumes:
- # - name: system
- # capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # # The same as for agent URL, here is an URL to the image that should be
- # # used for deploy the node. It should also be accessible from deploying
- # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
-
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data_cfg01
-
- # interfaces:
- # - label: enp3s0f0 # Infra interface
- # mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
- # - label: enp3s0f1
- # l2_network_device: admin
- # mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
-
- # network_config:
- # enp3s0f0:
- # networks:
- # - infra
- # enp3s0f1:
- # networks:
- # - admin
-
- - name: {{ HOSTNAME_KVM01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp9s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
- - label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
-
- network_config:
- enp9s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
-
- - name: {{ HOSTNAME_KVM02 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp9s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
- - label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
-
- network_config:
- enp9s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
-
- - name: {{ HOSTNAME_KVM03 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM03 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: eno1 # see 'interfaces' below.
- cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- # - label: eno1
- - label: enp9s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
- # - label: eno2
- - label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
-
- network_config:
- # eno1:
- enp9s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
-
- # - name: {{ HOSTNAME_KVM04 }}
- # role: salt_minion
- # params:
- # ipmi_user: !os_env IPMI_USER
- # ipmi_password: !os_env IPMI_PASSWORD
- # ipmi_previlegies: OPERATOR
- # ipmi_host: !os_env IPMI_HOST_KVM04 # hostname or IP address
- # ipmi_lan_interface: lanplus
- # ipmi_port: 623
- #
- # root_volume_name: system # see 'volumes' below
- # cloud_init_volume_name: iso # see 'volumes' below
- # # cloud_init_iface_up: eno1 # see 'interfaces' below.
- # cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
- # volumes:
- # - name: system
- # capacity: !os_env NODE_VOLUME_SIZE, 200
- #
- # # The same as for agent URL, here is an URL to the image that should be
- # # used for deploy the node. It should also be accessible from deploying
- # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
- #
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
- #
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data
- #
- # interfaces:
- # # - label: eno1
- # - label: enp2s0f0
- # l2_network_device: admin
- # mac_address: !os_env ETH0_MAC_ADDRESS_KVM04
- # # - label: eno2
- # - label: enp2s0f1
- # mac_address: !os_env ETH1_MAC_ADDRESS_KVM04
- #
- # network_config:
- # # eno1:
- # enp2s0f0:
- # networks:
- # - admin
- # bond0:
- # networks:
- # - control
- # aggregation: active-backup
- # parents:
- # - enp2s0f1
- #
- - name: {{ HOSTNAME_CMP001 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP001 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
- cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_hwe
-
- interfaces:
- - label: enp2s0f0
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
- - label: enp2s0f1
- l2_network_device: admin
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
- - label: enp5s0f0
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
- features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
- - label: enp5s0f1
- mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
- features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
- # - label: enp5s0f2
- # mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
- # features: ['dpdk', 'dpdk_pci: 0000:05:00.2']
-
- network_config:
- enp2s0f1:
- networks:
- - admin
-
- - name: {{ HOSTNAME_CMP002 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP002 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: eno1 # see 'interfaces' below.
- cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_hwe
-
- interfaces:
- # - label: eno1
- - label: enp2s0f0
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
- # - label: eth0
- - label: enp2s0f1
- l2_network_device: admin
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
- # - label: eth3
- - label: enp5s0f0
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
- features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
- # - label: eth2
- - label: enp5s0f1
- mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
- features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
- # - label: eth4
- # mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
- # features: ['dpdk', 'dpdk_pci: 0000:0b:00.0']
-
- network_config:
- enp2s0f1:
- networks:
- - admin
-
- # - name: {{ HOSTNAME_CMP003 }}
- # role: salt_minion
- # params:
- # ipmi_user: !os_env IPMI_USER
- # ipmi_password: !os_env IPMI_PASSWORD
- # ipmi_previlegies: OPERATOR
- # ipmi_host: !os_env IPMI_HOST_CMP003 # hostname or IP address
- # ipmi_lan_interface: lanplus
- # ipmi_port: 623
- #
- # root_volume_name: system # see 'volumes' below
- # cloud_init_volume_name: iso # see 'volumes' below
- # # cloud_init_iface_up: eno1 # see 'interfaces' below.
- # cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
- # volumes:
- # - name: system
- # capacity: !os_env NODE_VOLUME_SIZE, 200
- #
- # # The same as for agent URL, here is an URL to the image that should be
- # # used for deploy the node. It should also be accessible from deploying
- # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
- #
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
- #
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data_hwe
- #
- # interfaces:
- # # - label: eno1
- # - label: enp2s0f1
- # mac_address: !os_env ETH1_MAC_ADDRESS_CMP003
- # # - label: eth0
- # - label: enp2s0f0
- # l2_network_device: admin
- # mac_address: !os_env ETH0_MAC_ADDRESS_CMP003
- #
- # network_config:
- # enp2s0f0:
- # networks:
- # - admin
diff --git a/tcp_tests/templates/cookied-bm-contrail40/core.yaml b/tcp_tests/templates/cookied-bm-contrail40/core.yaml
deleted file mode 100644
index 21ab849..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40/core.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_KVM01 with context %}
-{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_KVM02 with context %}
-{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_KVM03 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail40/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-contrail40/lab04-physical-inventory.yaml
deleted file mode 100644
index 5cb47fb..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40/lab04-physical-inventory.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-nodes:
- cfg01.cookied-bm-contrail40.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- # Physical nodes
-
- kvm01.cookied-bm-contrail40.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm02.cookied-bm-contrail40.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm03.cookied-bm-contrail40.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- cmp001.cookied-bm-contrail40.local:
- reclass_storage_name: openstack_compute_node01
- roles:
- - openstack_compute
- - features_lvm_backend
- - linux_system_codename_xenial
- interfaces:
- enp2s0f1:
- role: single_mgm
- deploy_address: 172.16.49.73
- enp5s0f0:
- role: bond0_ab_contrail
- tenant_address: 192.168.0.101
- enp5s0f1:
- role: single_vlan_ctl
- single_address: 10.167.8.101
-
- cmp002.cookied-bm-contrail40.local:
- reclass_storage_name: openstack_compute_node02
- roles:
- - openstack_compute
- - features_lvm_backend
- - linux_system_codename_xenial
- interfaces:
- enp2s0f1:
- role: single_mgm
- deploy_address: 172.16.49.74
- enp5s0f0:
- role: bond0_ab_contrail
- tenant_address: 192.168.0.102
- enp5s0f1:
- role: single_vlan_ctl
- single_address: 10.167.8.102
diff --git a/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml b/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml
deleted file mode 100644
index e9f7c0b..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml
+++ /dev/null
@@ -1,322 +0,0 @@
-{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-bm-contrail40/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-contrail40/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% set PATTERN = os_env('PATTERN', 'false') %}
-{% set RUN_TEMPEST = os_env('RUN_TEMPEST', 'false') %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
-
-- description: WR Install cinder volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:volume' state.sls cinder
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
-
-# install contrail
-
-- description: Install Docker services
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host' match.pillar 'docker:host' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host' state.sls docker.host
- fi; sleep 10;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 20}
- skip_fail: false
-
-- description: Install opencontrail database services on first minion
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 20}
- skip_fail: false
-
-- description: Install opencontrail database services
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database' state.sls opencontrail.database
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail control services on first minion
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail control services
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail collectors on first minion
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:collector and *01*' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail collectors
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 20}
- skip_fail: false
-
-- description: Spawn Opencontrail docker images
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control or I@opencontrail:collector' state.sls docker.client && sleep 15;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: Finalize opencontrail services
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 30}
- skip_fail: false
-
-- description: Finalize opencontrail services
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:client and not I@opencontrail:compute' state.sls opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Finalize opencontrail services
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 30}
- skip_fail: true
-
-- description: Check contrail status
- cmd: sleep 15; salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Reboot computes
- cmd: |
- salt "cmp*" system.reboot;
- sleep 600;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Remove crashes files from /var/crashes/ while vrouter was crashed
- cmd: salt "cmp*" cmd.run "rm -rf /var/crashes/*"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Apply Opencontrail compute
- cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 30}
- skip_fail: false
-
-- description: Apply Opencontrail compute
- cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Check status for contrail services
- cmd: |
- sleep 15;
- salt -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Hack resolv.conf on VCP nodes for internal services access
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Create heat-net before external net create
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create heat-net'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create public network for contrail
- cmd: |
- salt 'ntw01*' contrail.virtual_network_create public '{"external":true,"ip_prefix":"192.168.200.0","ip_prefix_len":24,"asn":64512,"target":10000}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Steps from neutron client for contrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create heat-net 10.20.30.0/24 --allocation-pool start=10.20.30.10,end=10.20.30.254 --gateway 10.20.30.1 --name heat-subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Steps from neutron client for contrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create heat-router'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Steps from neutron client for contrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set heat-router public'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Steps from neutron client for contrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add heat-router heat-subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Fix default security group for access to external net from outside
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule list --column ID -f value | xargs openstack security group rule delete';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol tcp';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol tcp';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol icmp';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol icmp';
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-# Starting prepare runtest
-
-- description: Upload tempest template
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: runtest.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/
- node_name: {{ HOSTNAME_CFG01 }}
- skip_fail: False
-
-- description: Include class with tempest template into cfg node
- cmd: |
- sed -i 's/classes\:/classes\:\n- cluster.{{ LAB_CONFIG_NAME }}.infra.runtest/g' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml;
- salt '*' saltutil.refresh_pillar;
- salt '*' saltutil.sync_all;
- salt 'ctl01*' pkg.install docker.io;
- salt 'ctl01*' cmd.run 'iptables --policy FORWARD ACCEPT';
- salt 'cfg01*' state.sls salt.minion && sleep 20;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Enforce keystone client
- cmd: |
- salt 'cfg01*' state.sls keystone.client;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Create flavors for tests
- cmd: |
- salt 'cfg01*' state.sls nova.client;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Upload cirros image
- cmd: |
- salt 'cfg01*' state.sls glance.client;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Generate tempest config
- cmd: |
- salt 'cfg01*' state.sls runtest;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Download cirros image for runtest
- cmd: |
- wget http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img -O /tmp/TestCirros-0.3.5.img
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Test future contrail manipulation
- cmd: |
- apt install crudini jq -y;
- crudini --set /tmp/test/tempest.conf auth tempest_roles admin;
- crudini --set /tmp/test/tempest.conf patrole custom_policy_files /etc/opencontrail/policy.json;
- crudini --set /tmp/test/tempest.conf sdn service_name opencontrail;
- cat /tmp/test/tempest.conf;
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Run tempest from new docker image
- cmd: |
- OPENSTACK_VERSION=`salt-call --out=newline_values_only pillar.get _param:openstack_version`;
- docker run --name "run-tempest-yml" -d -e ARGS="-r test -w 2" -v /tmp/test/tempest.conf:/etc/tempest/tempest.conf -v /tmp/:/tmp/ -v /tmp/test:/root/tempest -v /etc/ssl/certs/:/etc/ssl/certs/ docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:$OPENSTACK_VERSION /bin/bash -c "run-tempest";
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Test Wait container script
- cmd: |
- report_file=`find /tmp/test -maxdepth 1 -name 'report_*xml' -print -quit`;
- if [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ -f "$report_file" ];
- then echo "All done!"; docker logs run-tempest-yml;
- elif [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ ! -f "$report_file" ];
- then echo "Exit without report!"; docker logs run-tempest-yml;
- else echo "Tempest not finished... ";sleep 900; false;
- fi
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 25, delay: 30}
- skip_fail: false
-
-- description: Download xml results
- download:
- remote_path: /tmp/test/
- remote_filename: "report_*.xml"
- local_path: {{ os_env('PWD') }}
- node_name: {{ HOSTNAME_CTL01 }}
- skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-contrail40/runtest.yml b/tcp_tests/templates/cookied-bm-contrail40/runtest.yml
deleted file mode 100644
index f0d6d8a..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40/runtest.yml
+++ /dev/null
@@ -1,47 +0,0 @@
-classes:
-- service.runtest.tempest
-- service.runtest.tempest.public_net
-- service.runtest.tempest.services.manila.glance
-parameters:
- _param:
- glance_image_cirros_location: http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img
- glance_image_fedora_location: http://cz8133.bud.mirantis.net:8099/Fedora-Cloud-Base-27-1.6.x86_64.qcow2
- glance_image_manila_location: http://cz8133.bud.mirantis.net:8099/manila-service-image-master.qcow2
- openstack_public_neutron_subnet_allocation_end: 192.168.200.220
- openstack_public_neutron_subnet_allocation_start: 192.168.200.130
- openstack_public_neutron_subnet_cidr: 192.168.200.0/24
- openstack_public_neutron_subnet_gateway: 192.168.200.1
- runtest_tempest_cfg_dir: /tmp/test
- runtest_tempest_cfg_name: tempest.conf
- runtest_tempest_public_net: public
- tempest_test_target: ctl01*
- neutron:
- client:
- enabled: true
- runtest:
- enabled: true
- keystonerc_node: ctl01*
- tempest:
- DEFAULT:
- log_file: tempest.log
- cfg_dir: ${_param:runtest_tempest_cfg_dir}
- cfg_name: ${_param:runtest_tempest_cfg_name}
- compute:
- min_compute_nodes: 2
- convert_to_uuid:
- network:
- public_network_id: ${_param:runtest_tempest_public_net}
- enabled: true
- heat_plugin:
- build_timeout: '600'
- put_keystone_rc_enabled: false
- put_local_image_file_enabled: false
- share:
- capability_snapshot_support: true
- run_driver_assisted_migration_tests: false
- run_manage_unmanage_snapshot_tests: false
- run_manage_unmanage_tests: false
- run_migration_with_preserve_snapshots_tests: false
- run_quota_tests: true
- run_replication_tests: false
- run_snapshot_tests: true
diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml
deleted file mode 100644
index e7dcb16..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml
+++ /dev/null
@@ -1,247 +0,0 @@
-default_context:
- backup_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
- +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
- qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
- m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
- 7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
- 2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
- HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
- AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
- o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
- 5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
- XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
- AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
- USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
- uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
- QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
- 98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
- r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
- qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
- CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
- p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
- 79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
- NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
- CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
- XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
- N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_control_node01_address: 10.167.8.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.8.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.8.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.8.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
- oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
- IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
- kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
- wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
- 27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
- 5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
- lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
- k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
- 3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
- dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
- 0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
- qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
- BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
- UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
- VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
- 1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
- nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
- Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
- FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
- HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
- Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
- poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
- 17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
- l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
- -----END RSA PRIVATE KEY-----
- cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
- cluster_domain: cookied-bm-4.0-contrail.local
- cluster_name: cookied-bm-4.0-contrail
- opencontrail_version: 4.0
- linux_repo_contrail_component: oc40
- compute_bond_mode: active-backup
- compute_padding_with_zeros: 'True'
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.8.0/24
- control_vlan: '2422'
- cookiecutter_template_branch: ''
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 172.16.49.65
- deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.49.64/26
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: sgudz@mirantis.com
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.8.241
- infra_kvm01_deploy_address: 172.16.49.67
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.8.242
- infra_kvm02_deploy_address: 172.16.49.68
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.8.243
- infra_kvm03_deploy_address: 172.16.49.69
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.8.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- internal_proxy_enabled: 'False'
- kqueen_custom_mail_enabled: 'False'
- kqueen_enabled: 'False'
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 172.16.49.66
- maas_deploy_cidr: 172.16.49.64/26
- maas_deploy_gateway: 172.16.49.65
- maas_deploy_range_end: 172.16.49.119
- maas_deploy_range_start: 172.16.49.77
- maas_deploy_vlan: '0'
- maas_dhcp_enabled: 'True'
- maas_fabric_name: fabric-51
- maas_hostname: cfg01
- maas_manage_deploy_network: 'True'
- mcp_common_scripts_branch: ''
- mcp_version: proposed
- offline_deployment: 'False'
- opencontrail_analytics_address: 10.167.8.30
- opencontrail_analytics_hostname: nal
- opencontrail_analytics_node01_address: 10.167.8.31
- opencontrail_analytics_node01_hostname: nal01
- opencontrail_analytics_node02_address: 10.167.8.32
- opencontrail_analytics_node02_hostname: nal02
- opencontrail_analytics_node03_address: 10.167.8.33
- opencontrail_analytics_node03_hostname: nal03
- opencontrail_compute_iface_mask: '24'
- opencontrail_control_address: 10.167.8.20
- opencontrail_control_hostname: ntw
- opencontrail_control_node01_address: 10.167.8.21
- opencontrail_control_node01_hostname: ntw01
- opencontrail_control_node02_address: 10.167.8.22
- opencontrail_control_node02_hostname: ntw02
- opencontrail_control_node03_address: 10.167.8.23
- opencontrail_control_node03_hostname: ntw03
- opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.220
- opencontrail_router01_hostname: rtr01
- opencontrail_router02_address: 10.167.8.101
- opencontrail_router02_hostname: rtr02
- openldap_enabled: 'False'
- openssh_groups: ''
- openstack_benchmark_node01_address: 10.167.8.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.8
- openstack_compute_rack01_tenant_subnet: 192.168.0
- openstack_control_address: 10.167.8.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.8.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.8.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.8.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.8.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.8.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.8.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.8.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_message_queue_address: 10.167.8.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.8.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.8.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.8.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: opencontrail
- openstack_neutron_bgp_vpn: 'False'
- openstack_neutron_bgp_vpn_driver: bagpipe
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_nova_compute_reserved_host_memory_mb: '900'
- openstack_proxy_address: 10.167.8.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.8.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.8.82
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.8.19
- openstack_version: ocata
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
- salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
- salt_master_address: 172.16.49.66
- salt_master_hostname: cfg01
- salt_master_management_address: 172.16.49.66
- shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- stacklight_enabled: 'True'
- stacklight_log_address: 10.167.8.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.8.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.8.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.8.63
- stacklight_log_node03_hostname: log03
- stacklight_long_term_storage_type: prometheus
- stacklight_monitor_address: 10.167.8.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.8.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.8.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.8.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 10.167.8.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.8.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.8.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.8.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.220
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 192.168.0.0/24
- tenant_vlan: '2423'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'True'
- openldap_domain: cookied-bm-4.0-contrail.local
- openldap_enabled: 'True'
- openldap_organisation: ${_param:cluster_name}
diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml
deleted file mode 100644
index 50eabbe..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml
+++ /dev/null
@@ -1,247 +0,0 @@
-default_context:
- backup_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
- +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
- qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
- m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
- 7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
- 2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
- HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
- AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
- o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
- 5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
- XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
- AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
- USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
- uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
- QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
- 98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
- r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
- qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
- CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
- p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
- 79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
- NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
- CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
- XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
- N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_control_node01_address: 10.167.8.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.8.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.8.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.8.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
- oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
- IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
- kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
- wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
- 27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
- 5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
- lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
- k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
- 3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
- dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
- 0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
- qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
- BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
- UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
- VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
- 1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
- nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
- Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
- FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
- HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
- Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
- poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
- 17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
- l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
- -----END RSA PRIVATE KEY-----
- cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
- cluster_domain: cookied-bm-4.0-contrail.local
- cluster_name: cookied-bm-4.0-contrail
- opencontrail_version: 4.0
- linux_repo_contrail_component: oc40
- compute_bond_mode: active-backup
- compute_padding_with_zeros: 'True'
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.8.0/24
- control_vlan: '2422'
- cookiecutter_template_branch: ''
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 172.16.49.65
- deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.49.64/26
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: sgudz@mirantis.com
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.8.241
- infra_kvm01_deploy_address: 172.16.49.67
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.8.242
- infra_kvm02_deploy_address: 172.16.49.68
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.8.243
- infra_kvm03_deploy_address: 172.16.49.69
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.8.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- internal_proxy_enabled: 'False'
- kqueen_custom_mail_enabled: 'False'
- kqueen_enabled: 'False'
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 172.16.49.66
- maas_deploy_cidr: 172.16.49.64/26
- maas_deploy_gateway: 172.16.49.65
- maas_deploy_range_end: 172.16.49.119
- maas_deploy_range_start: 172.16.49.77
- maas_deploy_vlan: '0'
- maas_dhcp_enabled: 'True'
- maas_fabric_name: fabric-51
- maas_hostname: cfg01
- maas_manage_deploy_network: 'True'
- mcp_common_scripts_branch: ''
- mcp_version: proposed
- offline_deployment: 'False'
- opencontrail_analytics_address: 10.167.8.30
- opencontrail_analytics_hostname: nal
- opencontrail_analytics_node01_address: 10.167.8.31
- opencontrail_analytics_node01_hostname: nal01
- opencontrail_analytics_node02_address: 10.167.8.32
- opencontrail_analytics_node02_hostname: nal02
- opencontrail_analytics_node03_address: 10.167.8.33
- opencontrail_analytics_node03_hostname: nal03
- opencontrail_compute_iface_mask: '24'
- opencontrail_control_address: 10.167.8.20
- opencontrail_control_hostname: ntw
- opencontrail_control_node01_address: 10.167.8.21
- opencontrail_control_node01_hostname: ntw01
- opencontrail_control_node02_address: 10.167.8.22
- opencontrail_control_node02_hostname: ntw02
- opencontrail_control_node03_address: 10.167.8.23
- opencontrail_control_node03_hostname: ntw03
- opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.220
- opencontrail_router01_hostname: rtr01
- opencontrail_router02_address: 10.167.8.101
- opencontrail_router02_hostname: rtr02
- openldap_enabled: 'False'
- openssh_groups: ''
- openstack_benchmark_node01_address: 10.167.8.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.8
- openstack_compute_rack01_tenant_subnet: 192.168.0
- openstack_control_address: 10.167.8.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.8.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.8.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.8.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.8.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.8.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.8.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.8.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_message_queue_address: 10.167.8.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.8.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.8.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.8.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: opencontrail
- openstack_neutron_bgp_vpn: 'False'
- openstack_neutron_bgp_vpn_driver: bagpipe
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_nova_compute_reserved_host_memory_mb: '900'
- openstack_proxy_address: 10.167.8.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.8.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.8.82
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.8.19
- openstack_version: pike
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
- salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
- salt_master_address: 172.16.49.66
- salt_master_hostname: cfg01
- salt_master_management_address: 172.16.49.66
- shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- stacklight_enabled: 'True'
- stacklight_log_address: 10.167.8.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.8.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.8.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.8.63
- stacklight_log_node03_hostname: log03
- stacklight_long_term_storage_type: prometheus
- stacklight_monitor_address: 10.167.8.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.8.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.8.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.8.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 10.167.8.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.8.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.8.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.8.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.220
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 192.168.0.0/24
- tenant_vlan: '2423'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'True'
- openldap_domain: cookied-bm-4.0-contrail.local
- openldap_enabled: 'True'
- openldap_organisation: ${_param:cluster_name}
diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt-context-environment.yaml
deleted file mode 100644
index d43b66a..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40/salt-context-environment.yaml
+++ /dev/null
@@ -1,273 +0,0 @@
-nodes:
- # Virtual Control Plane nodes
- cid01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: cicd_control_node01
- roles:
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- cid02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: cicd_control_node02
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- cid03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: cicd_control_node03
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ctl01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_ctl
-
- ctl02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ctl03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- prx01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- prx02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- nal01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node01
- roles:
- - opencontrail_analytics
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- nal02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node02
- roles:
- - opencontrail_analytics
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- nal03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node03
- roles:
- - opencontrail_analytics
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ntw01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node01
- roles:
- - opencontrail_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ntw02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node02
- roles:
- - opencontrail_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ntw03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node03
- roles:
- - opencontrail_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
-# bmk01.cookied-bm-mcp-ocata-contrail.local:
-# reclass_storage_name: openstack_benchmark_node01
-# roles:
-# - openstack_benchmark
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
deleted file mode 100644
index aa4f4d7..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
+++ /dev/null
@@ -1,175 +0,0 @@
-{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-contrail40/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-bm-contrail40/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-contrail40') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = os_env('CLUSTER_CONTEXT_NAME', 'salt-context-cookiecutter-contrail.yaml') %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
-
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-- description: "Workaround for rack01 compute generator"
- cmd: |
- set -e;
- # Remove rack01 key
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/nodes.yml;
- # Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: "Change path to internal storage for salt.control images"
- cmd: |
- set -e;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Temporary WR for correct bridge name according to envoronment templates
- cmd: |
- sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-- description: Update minion information
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun openssh after env model is generated
- cmd: |
- salt-call state.sls openssh
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.network.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-########################################
-# Spin up Control Plane VMs on KVM nodes
-########################################
-
-- description: Execute 'libvirt' states to create necessary libvirt networks
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Create VMs for control plane
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
- cmd: |
- salt-key -l acc| sort > /tmp/current_keys.txt &&
- salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 20, delay: 30}
- skip_fail: false
-
-#########################################
-# Configure all running salt minion nodes
-#########################################
-
-- description: Hack resolv.conf on VCP nodes for internal services access
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Refresh pillars on all minions
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync all salt resources
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Show reclass-salt --top for generated nodes
- cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: "Lab04 workaround: Give each node root acces with key from cfg01"
- cmd: |
- set -e;
- set -x;
- key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
- salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
- salt '*' cmd.run "service sshd restart"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: "Lab04 workaround: Control network access from cfg01 node using sshuttle via kvm01"
- cmd: |
- set -e;
- set -x;
- KVM01_DEPLOY_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:infra_kvm_node01_deploy_address);
- apt-get install -y sshuttle;
- sshuttle -r ${KVM01_DEPLOY_ADDRESS} 10.167.8.0/24 -D >/dev/null;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-contrail40/sl.yaml b/tcp_tests/templates/cookied-bm-contrail40/sl.yaml
deleted file mode 100644
index a5495cd..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40/sl.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-sl.yaml' as SHARED_SL with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
-{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
-{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
-{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
-{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
-{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
-{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
-{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-contrail40/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-contrail40/underlay--meta-data.yaml
deleted file mode 100644
index a594a53..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 6b6ec9f..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,79 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data1604-hwe.yaml
deleted file mode 100644
index ba69177..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data1604-hwe.yaml
+++ /dev/null
@@ -1,125 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup {interface_name}
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- # - echo "Preparing base OS"
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- # Install latest kernel
- # - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- # - reboot
- ########################################################
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- # The loopback network interface
- auto lo
- iface lo inet loopback
- auto {interface_name}
- iface {interface_name} inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data1604.yaml
deleted file mode 100644
index bdcd21d..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,121 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup {interface_name}
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- # - echo "Preparing base OS"
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- ########################################################
- # Node is ready, allow SSH access
- # - echo "Allow SSH access ..."
- # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- # The loopback network interface
- auto lo
- iface lo inet loopback
- auto {interface_name}
- iface {interface_name} inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-contrail40/underlay.yaml b/tcp_tests/templates/cookied-bm-contrail40/underlay.yaml
deleted file mode 100644
index ba82288..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40/underlay.yaml
+++ /dev/null
@@ -1,571 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-
-#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-contrail40') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP003 = os_env('HOSTNAME_CMP003', 'cmp003.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM04 = os_env('HOSTNAME_KVM04', 'kvm04.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
-{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
-{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.68') %}
-{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.69') %}
-{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.73') %}
-{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
-{% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.49.121') %}
-{% set ETH0_IP_ADDRESS_KVM04 = os_env('ETH0_IP_ADDRESS_KVM04', '172.16.49.122') %}
-# {% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
-
-{% import 'cookied-bm-contrail40/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-bm-contrail40/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-bm-contrail40/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
-{% import 'cookied-bm-contrail40/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_HWE with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
- - &cloudinit_user_data_hwe {{ CLOUDINIT_USER_DATA_HWE }}
-
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-bm-contrail40_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
- params:
- ip_reserved:
- gateway: +62
- l2_network_device: +61
- default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
- default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
- default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
- default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
- default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
- default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
- default_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
- default_{{ HOSTNAME_KVM04 }}: {{ ETH0_IP_ADDRESS_KVM04 }}
- # default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
- virtual_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
- virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
- virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
- virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
- virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
- virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
- virtual_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
- virtual_{{ HOSTNAME_KVM04 }}: {{ ETH0_IP_ADDRESS_KVM04 }}
- # virtual_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
- #ip_ranges:
- # dhcp: [+2, -4]
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: -2
-
- groups:
-
- - name: virtual
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-
- network_pools:
- admin: admin-pool01
-
- l2_network_devices:
- # Ironic management interface
- admin:
- address_pool: admin-pool01
- dhcp: false
- parent_iface:
- phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
- #- label: ens4
- # l2_network_device: private
- # interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- #ens4:
- # networks:
- # - private
-
-
- - name: default
- driver:
- name: devops_driver_ironic
- params:
- os_auth_token: fake-token
- ironic_url: !os_env IRONIC_URL # URL that will be used by fuel-devops
- # to access Ironic API
- # Agent URL that is accessible from deploying node when nodes
- # are bootstrapped with PXE. Usually PXE/provision network address is used.
- agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
- agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
-
- network_pools:
- admin: admin-pool01
-
- nodes:
-
- # - name: {{ HOSTNAME_CFG01 }}
- # role: salt_master
- # params:
- # ipmi_user: !os_env IPMI_USER
- # ipmi_password: !os_env IPMI_PASSWORD
- # ipmi_previlegies: OPERATOR
- # ipmi_host: !os_env IPMI_HOST_CFG01 # hostname or IP address
- # ipmi_lan_interface: lanplus
- # ipmi_port: 623
-
- # root_volume_name: system # see 'volumes' below
- # cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: enp3s0f1 # see 'interfaces' below.
- # volumes:
- # - name: system
- # capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # # The same as for agent URL, here is an URL to the image that should be
- # # used for deploy the node. It should also be accessible from deploying
- # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
-
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data_cfg01
-
- # interfaces:
- # - label: enp3s0f0 # Infra interface
- # mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
- # - label: enp3s0f1
- # l2_network_device: admin
- # mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
-
- # network_config:
- # enp3s0f0:
- # networks:
- # - infra
- # enp3s0f1:
- # networks:
- # - admin
-
- - name: {{ HOSTNAME_KVM01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp9s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
- - label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
-
- network_config:
- enp9s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
-
- - name: {{ HOSTNAME_KVM02 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp9s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
- - label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
-
- network_config:
- enp9s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
-
- - name: {{ HOSTNAME_KVM03 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM03 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: eno1 # see 'interfaces' below.
- cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- # - label: eno1
- - label: enp9s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
- # - label: eno2
- - label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
-
- network_config:
- # eno1:
- enp9s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
-
- # - name: {{ HOSTNAME_KVM04 }}
- # role: salt_minion
- # params:
- # ipmi_user: !os_env IPMI_USER
- # ipmi_password: !os_env IPMI_PASSWORD
- # ipmi_previlegies: OPERATOR
- # ipmi_host: !os_env IPMI_HOST_KVM04 # hostname or IP address
- # ipmi_lan_interface: lanplus
- # ipmi_port: 623
- #
- # root_volume_name: system # see 'volumes' below
- # cloud_init_volume_name: iso # see 'volumes' below
- # # cloud_init_iface_up: eno1 # see 'interfaces' below.
- # cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
- # volumes:
- # - name: system
- # capacity: !os_env NODE_VOLUME_SIZE, 200
- #
- # # The same as for agent URL, here is an URL to the image that should be
- # # used for deploy the node. It should also be accessible from deploying
- # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
- #
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
- #
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data
- #
- # interfaces:
- # # - label: eno1
- # - label: enp2s0f0
- # l2_network_device: admin
- # mac_address: !os_env ETH0_MAC_ADDRESS_KVM04
- # # - label: eno2
- # - label: enp2s0f1
- # mac_address: !os_env ETH1_MAC_ADDRESS_KVM04
- #
- # network_config:
- # # eno1:
- # enp2s0f0:
- # networks:
- # - admin
- # bond0:
- # networks:
- # - control
- # aggregation: active-backup
- # parents:
- # - enp2s0f1
- #
- - name: {{ HOSTNAME_CMP001 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP001 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
- cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_hwe
-
- interfaces:
- - label: enp2s0f0
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
- - label: enp2s0f1
- l2_network_device: admin
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
- - label: enp5s0f0
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
- features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
- - label: enp5s0f1
- mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
- features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
- # - label: enp5s0f2
- # mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
- # features: ['dpdk', 'dpdk_pci: 0000:05:00.2']
-
- network_config:
- enp2s0f1:
- networks:
- - admin
-
- - name: {{ HOSTNAME_CMP002 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP002 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: eno1 # see 'interfaces' below.
- cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_hwe
-
- interfaces:
- # - label: eno1
- - label: enp2s0f0
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
- # - label: eth0
- - label: enp2s0f1
- l2_network_device: admin
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
- # - label: eth3
- - label: enp5s0f0
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
- features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
- # - label: eth2
- - label: enp5s0f1
- mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
- features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
- # - label: eth4
- # mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
- # features: ['dpdk', 'dpdk_pci: 0000:0b:00.0']
-
- network_config:
- enp2s0f1:
- networks:
- - admin
-
- # - name: {{ HOSTNAME_CMP003 }}
- # role: salt_minion
- # params:
- # ipmi_user: !os_env IPMI_USER
- # ipmi_password: !os_env IPMI_PASSWORD
- # ipmi_previlegies: OPERATOR
- # ipmi_host: !os_env IPMI_HOST_CMP003 # hostname or IP address
- # ipmi_lan_interface: lanplus
- # ipmi_port: 623
- #
- # root_volume_name: system # see 'volumes' below
- # cloud_init_volume_name: iso # see 'volumes' below
- # # cloud_init_iface_up: eno1 # see 'interfaces' below.
- # cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
- # volumes:
- # - name: system
- # capacity: !os_env NODE_VOLUME_SIZE, 200
- #
- # # The same as for agent URL, here is an URL to the image that should be
- # # used for deploy the node. It should also be accessible from deploying
- # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
- #
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
- #
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data_hwe
- #
- # interfaces:
- # # - label: eno1
- # - label: enp2s0f1
- # mac_address: !os_env ETH1_MAC_ADDRESS_CMP003
- # # - label: eth0
- # - label: enp2s0f0
- # l2_network_device: admin
- # mac_address: !os_env ETH0_MAC_ADDRESS_CMP003
- #
- # network_config:
- # enp2s0f0:
- # networks:
- # - admin
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/core.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/core.yaml
deleted file mode 100644
index 55d6d8d..0000000
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/core.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/post.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/post.yaml
deleted file mode 100644
index fd07061..0000000
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/post.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
-{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-- description: "Give each node root access with key from cfg01"
- cmd: |
- set -e;
- set -x;
- key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
- salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
- salt '*' cmd.run "service sshd restart"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: temp WR
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ifdown br-prv; ifup br-prv'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext 172.17.42.128/26 --name net04_ext__subnet --disable-dhcp --allocation-pool start=172.17.42.130,end=172.17.42.180 --gateway 172.17.42.129'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 192.168.1.0/24 --name net04__subnet --allocation-pool start=192.168.1.150,end=192.168.1.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01 --ha False'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
deleted file mode 100644
index ce13598..0000000
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
+++ /dev/null
@@ -1,206 +0,0 @@
-default_context:
- backup_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEogIBAAKCAQEAvDqzt/PHWvSSJrBtvD3UWp21CDrAajgOPeXrVm7VU+sDDpw0
- YqDvVhYfT/N6ha+SWOK00KyTuMMbB8/I4tvsP9vvCXy7v2AJID/ZO2z/t8KfTDEJ
- C75/6a0UBg6sl3i7+cUOHbeK+lkcfdnSI1H8Jzdhk4Xj7t7q+MIKTs5n+AlScgyz
- NSiD2nG/U5LmaQ+rjZ1VsF9J0YTds0yLDF3hztVoDTs7j5obl7Xab3ZlwalgH5Gc
- Id6BI09jkUbppdPzHpzV2oad7cFpVYTt9m3/MMT0amzPuwl/u/dI64rRuWPe60eT
- qeVMQD0zP6o9F79upbzQStt82lPJcfF4CXvxYwIDAQABAoIBAAHUXDzUfMKQj/8a
- RebHfxHmaIUM9SPTKahGXNQ5PY+UQDJbKFMxF0Jx8pn3VuCHxVdh1LoWg1UPaGra
- BSzXUGOKgrdH5BdHGq+aj0T5mT6zAJNgAqN/lYSy7vfkGp9aSBF0bd+yEgK+7Pz4
- Kge320iSTDt/2KhQuF30nN8JOI97m2uk2YHH8TixfVtmgLPEy+0Mw4VZLsHD4OY1
- zu8xN6co2aQR0DB0MPKD6IxH62wSOJKBzF4o5xzzy/fl0ysDZbZ8Z/5Rejvp3yNT
- 68B0X5CM27hVdYE+/tcKGl9WKmewIf3fTZUfBcwFIObMIl9fkK/519nwFed4AfOX
- /a2LCBECgYEA9Lyl/eyzXuU2dgs6Gw/WMobqOVnHF9wbukS1XSKdwMogtpt7Pb23
- +32r9xHgeRDvvWwSp8lNPZ8mu77dQ6akbOuOk5C6V3Mqt4zam3DBDMtL63nKq8tq
- LQ0PVjj8cAgu3GSDCz7htqUb44rn5tX9zlM0vrwxzyYqbet7ZbsGoYsCgYEAxORQ
- JFn1vwag8VBw3bngx3SJ46CpCC8Gz830W7pEaTS6zTTiDC4p5sATGya91JS8l47G
- ikP2bcWzvT6aP/u+TZSqZiqp5Kn37fx8Et+ltIl47SH+PJHIR9F9r2f9zqla6mlP
- zcX/mTSuAJCTP4whQA3/f1wNAeBnewhK9fXCOokCgYAz6TPYSXW+giXsIfOAO/q2
- GvHhmk5lnDVxbBOAHtCRTQ5lTVN1xCEbQgvQy0TuyQ3hAuRuHH+6u2BO4Gw0Zkto
- IwrJ+B/eXrpH1qOj5uW73f9Lgjjf+bSau7NuGYZKCSJPcy5smzjrMdhZimQoDWnZ
- csK0VlzGUJUdXZ599I6ygwKBgGTf+LN3J7H0Snb4WKsw9Zoa+h6WjKO1vE6xXVW1
- rCEes+o5Autsp2ki1WcexTlp7unTa6MhSNta5Ei8Dzli2FBVL6xihWKzNmRG7Kaa
- 0QIbQMp1lRUhN7Sb/0HkDKRaHktlI07w95Bd7hw59kcjm1F/Gnz9A2kHuNzPFeDI
- RffJAoGAdeCID5sb0oHEHTIxxB+cgfaiyaAe9qrW2INNWLVn5OTDh6cidatnWAor
- M/SxwNoiYcCpi869q7wzjw5gNOVoNJbmwzDA7s+lgjTPQpq2jmO6RtweKbYoN5Zw
- ++LiD3r07TD3p2QAyeooT29D/d6/2Hd6oyTJcZWIQTN+MTcXQO4=
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8OrO388da9JImsG28PdRanbUIOsBqOA495etWbtVT6wMOnDRioO9WFh9P83qFr5JY4rTQrJO4wxsHz8ji2+w/2+8JfLu/YAkgP9k7bP+3wp9MMQkLvn/prRQGDqyXeLv5xQ4dt4r6WRx92dIjUfwnN2GThePu3ur4wgpOzmf4CVJyDLM1KIPacb9TkuZpD6uNnVWwX0nRhN2zTIsMXeHO1WgNOzuPmhuXtdpvdmXBqWAfkZwh3oEjT2ORRuml0/MenNXahp3twWlVhO32bf8wxPRqbM+7CX+790jritG5Y97rR5Op5UxAPTM/qj0Xv26lvNBK23zaU8lx8XgJe/Fj
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_control_node01_address: 10.167.11.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.11.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.11.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.11.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAshiE2vK11KH1/PHO9v5IcT1ol3kuAorv6IgW+1paT9w4pFnd
- H2DHQxTJsZ629cig+ELVAKHQnkND2U++/DM20ai5ZfpOwlvd+dL95/FbGb62Ozxx
- kxBjyc/Bbbs8LcZtS1SN+agdkjQG1StpckUbFppoJ9nzWgnEcdYdonQ6aThgd+YL
- rAOX04s3cMlCflClQl3lGFo24Qdhk/Y4M5rodfqfD5NOSKEhYP/dTMunri8zB5bU
- ifvOvCWUKUOxLjkx95raY82xMHUobMYk87RcLPcq8pyz96/FPoiLqxM1oznTKNiI
- 0bW0xjf7FFjfLCjTapKZPRz8+Wkvzmzj35LLrwIDAQABAoIBADJoGCo0Kdy93nay
- JgboX66VV+YPaUNU+aQR6JdJsmgKB4oU2S4JYTyORKveJSCZoV3C5LCiG/6/QRPf
- q0mMYUaj/51qZCJEiCYuXqjoOgWmYcOQTwD10ZiMEc4yAU1fbQ22J9zyhTQdP5XU
- DKtH/eu+1h35ZRQl0ZD6rjaNuP6QekJM6IVCC7XBaCG5+wSER9R25HbbDhdb7CwH
- W1GP9IgISqy9Z3f4PQOyCUmn/O99lN8kry6ui7bCywRfITV6C+pn02DpMgzKZ8jn
- 3yts1f2mIbYVxnahtCaI3QTag6wBsnFq+U0uIXrUGMeeRzg9N1Ur01qdJpIR9g0v
- Nt7QUZkCgYEA4lEavsFitSll/33JY4q82nYpjXAxTON3xraUqNYn5Cde06gNi8n1
- t9TCLUqDhSpvgEOyJE/uwo5LAj79Ce2EwLkCttNggqRXBlY5ZpljwfWmxZtuGm/z
- BJaOtkaK/1diR/+Qn/fTMyPH5JIXuQ6/XF60W4MSbzPgY4GO1BDx+G0CgYEAyXRT
- 00GDdiXbxQmzeHTO9Bg5y36Y1FEWDLnc89bpHPTR4sT/XCczerevy/l8jsdzZlnu
- 5ZddfWMF7EGNo51Zbmi0oLQ7nzigoVFcnhFHRFoCP36T9mvJk7O8Ao3ttpl/J2r0
- mFiaKi0lhmZVbNpmliKjWAMZJyt6I7AfYekcOwsCgYEA0W8MuQptNgkhgtX80ElL
- iz9eJK12chjuds3vtG66a8CjWGtkXcB/y6bwKsmR/GHQ3XnIGSJ/aTwU3fc8YzuS
- ZmbPxDDIVx2OCycv52p7jrqtoqC7u2tuEQji+Hs/lhxfrxEp3V+R6vlpunQX0AF9
- xRU/ApDBNndjZ7I1YrprseECgYA+zx8HgaiMIJeZokGrb7fKkPcMBCeAItveEeDa
- wYmito3txv/a6nn5a+XKkbmNBpBrO+To/j1ux33kQDf56Cgm7UxLwoXISa6DPUvE
- GJ0AqZOD2mIldUu+2k3m+ftAcDEdyBIEobNHLRZDBgriSmGrs5b77NNdzAdjsxjF
- vRlJKwKBgD8DcP/C9pABC2mRQyH//RTk6XZfiDY0L18lwH7acEdHlJiF1PTwvIHD
- cj1nMyG2MxEiSt1E5O/YQ4Lo3sognFIb8keu7IYxEgLXhvWFR3RwaYCjrF4ZGfD2
- +83eUFPZQvEwTY/8OCogzJQfs1CT8+pLdO9tZQbrAaxfmF6c48KN
- -----END RSA PRIVATE KEY-----
- cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyGITa8rXUofX88c72/khxPWiXeS4Ciu/oiBb7WlpP3DikWd0fYMdDFMmxnrb1yKD4QtUAodCeQ0PZT778MzbRqLll+k7CW9350v3n8VsZvrY7PHGTEGPJz8Ftuzwtxm1LVI35qB2SNAbVK2lyRRsWmmgn2fNaCcRx1h2idDppOGB35gusA5fTizdwyUJ+UKVCXeUYWjbhB2GT9jgzmuh1+p8Pk05IoSFg/91My6euLzMHltSJ+868JZQpQ7EuOTH3mtpjzbEwdShsxiTztFws9yrynLP3r8U+iIurEzWjOdMo2IjRtbTGN/sUWN8sKNNqkpk9HPz5aS/ObOPfksuv
- cluster_domain: cookied-bm-dpdk-pipeline.local
- cluster_name: cookied-bm-dpdk-pipeline
- compute_bond_mode: balance-slb
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: zEFbUBMME6LFdiL0rJWFgHMdQGgywnDSE9vFYvHgEBeYHb4QJsDl3HqpdaTgqYlF
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.11.0/24
- control_vlan: '2416'
- cookiecutter_template_branch: proposed
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 172.16.49.62
- deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.49.0/26
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: obutenko@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.11.241
- infra_kvm01_deploy_address: 172.16.49.11
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.11.242
- infra_kvm02_deploy_address: 172.16.49.12
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.11.243
- infra_kvm03_deploy_address: 172.16.49.13
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.11.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 172.16.49.15
- maas_hostname: cfg01
- mcp_version: testing
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openldap_domain: deploy-name.local
- openldap_enabled: 'True'
- openldap_organisation: ${_param:cluster_name}
- openstack_benchmark_node01_address: 10.167.11.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 10.167.11.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.11.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.11.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.11.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.11.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.11.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.11.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.11.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 10.167.11.224
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.167.12.6
- openstack_gateway_node02_address: 10.167.11.225
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.167.12.7
- openstack_gateway_node03_address: 10.167.11.226
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.167.12.8
- openstack_message_queue_address: 10.167.11.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.11.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.11.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.11.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'True'
- openstack_nfv_sriov_enabled: 'True'
- openstack_nova_compute_hugepages_count: '16'
- openstack_nova_compute_nfv_req_enabled: 'True'
- openstack_nfv_sriov_network: physnet2
- openstack_nfv_sriov_numvfs: '7'
- openstack_nfv_sriov_pf_nic: enp5s0f1
- openstack_nova_cpu_pinning: 6,7,8,9,10,11
- openstack_nova_compute_reserved_host_memory_mb: '900'
- openstack_ovs_dvr_enabled: 'False'
- openstack_ovs_encapsulation_type: vlan
- openstack_ovs_encapsulation_vlan_range: 2418:2420
- openstack_proxy_address: 10.167.11.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.11.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.11.82
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.11.19
- openstack_version: pike
- cinder_version: ${_param:openstack_version}
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- salt_api_password: HlcaUHzUnsWsg62uhF8ua5KEbqRbzijz
- salt_api_password_hash: $6$qdIFillN$XnzP7oIXRcbroVch7nlthyrSekjKlWND8q2MtoMF3Wz2ymepjAOjyqpyR55nmbH9OQzS8EcQJ6sfr5hWKDesV1
- salt_master_address: 10.167.11.2
- salt_master_hostname: cfg01
- salt_master_management_address: 172.16.49.2
- shared_reclass_branch: proposed
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- stacklight_enabled: 'False'
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.167.12.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.167.12.0/24
- tenant_vlan: '2417'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'True'
- sriov_network_subnet: 192.168.10.0/24
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-environment.yaml
deleted file mode 100644
index 7c436d4..0000000
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-environment.yaml
+++ /dev/null
@@ -1,117 +0,0 @@
-nodes:
- cfg01.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_static_ctl
-
- # Physical nodes
- kvm01.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp3s0f0:
- role: single_mgm
- enp3s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm02.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp3s0f0:
- role: single_mgm
- enp3s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm03.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp3s0f0:
- role: single_mgm
- enp3s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- cmp01.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: openstack_compute_node01
- roles:
- - openstack_compute_dpdk
- - features_lvm_backend
- - linux_system_codename_xenial
- - openstack_compute_sriov
- interfaces:
- enp5s0f0:
- role: combined_vlan_ctl_mgm
- single_address: 10.167.11.105
- enp3s0f0:
- role: bond_dpdk_prv_lacp
- dpdk_pci: "0000:03:00.0"
- tenant_address: 10.167.12.105
- enp3s0f1:
- role: bond_dpdk_prv_lacp
- dpdk_pci: "0000:03:00.1"
- # Remove this interface after switching to reclass 1.5.x
- # in favor to system.nova.compute.nfv.sriov
- enp5s0f1:
- role: sriov
-
- cmp02.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: openstack_compute_node02
- roles:
- - openstack_compute_dpdk
- - features_lvm_backend
- - linux_system_codename_xenial
- - openstack_compute_sriov
- interfaces:
- enp5s0f0:
- role: combined_vlan_ctl_mgm
- single_address: 10.167.11.106
- enp3s0f0:
- role: bond_dpdk_prv_lacp
- dpdk_pci: "0000:03:00.0"
- tenant_address: 10.167.12.106
- enp3s0f1:
- role: bond_dpdk_prv_lacp
- dpdk_pci: "0000:03:00.1"
- # Remove this interface after switching to reclass 1.5.x
- # in favor to system.nova.compute.nfv.sriov
- enp5s0f1:
- role: sriov
-
- gtw01.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- enp3s0f0:
- role: single_mgm
- deploy_address: 172.16.49.5
- enp3s0f1:
- role: bond0_ab_dvr_vxlan_ctl_mesh_floating
-
- gtw02.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: openstack_gateway_node02
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- interfaces:
- enp3s0f0:
- role: single_mgm
- deploy_address: 172.16.49.4
- enp3s0f1:
- role: bond0_ab_dvr_vxlan_ctl_mesh_floating
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-vcp-environment.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-vcp-environment.yaml
deleted file mode 100644
index cec7902..0000000
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-vcp-environment.yaml
+++ /dev/null
@@ -1,175 +0,0 @@
-nodes:
- ctl01.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ctl02.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ctl03.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- dbs01.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- dbs02.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- dbs03.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- msg01.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- msg02.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- msg03.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- prx01.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- prx02.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
-# mtr01.cookied-bm-dpdk-pipeline.local:
-# reclass_storage_name: stacklight_telemetry_node01
-# roles:
-# - stacklight_telemetry
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
-
-# mtr02.cookied-bm-dpdk-pipeline.local:
-# reclass_storage_name: stacklight_telemetry_node02
-# roles:
-# - stacklight_telemetry
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
-
-# mtr03.cookied-bm-dpdk-pipeline.local:
-# reclass_storage_name: stacklight_telemetry_node03
-# roles:
-# - stacklight_telemetry
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
-
- cid01.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: cicd_control_node01
- roles:
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- cid02.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: cicd_control_node02
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- cid03.cookied-bm-dpdk-pipeline.local:
- reclass_storage_name: cicd_control_node03
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
deleted file mode 100644
index 459ab69..0000000
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
+++ /dev/null
@@ -1,146 +0,0 @@
-{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-baremetal-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-- description: "Workaround for PROD-22201 (remove after switching to reclass 1.5.x) - Remove linux.network.interface object from the system models and use fixed 'environment' model instead"
- cmd: |
- set -e;
- apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
- [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
- . /root/venv-reclass-tools/bin/activate;
- pip install git+https://github.com/dis-xcom/reclass-tools;
- reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/system/;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-- description: "WR for changing VCP images path to internal storage"
- cmd: |
- set -e;
- apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
- [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
- . /root/venv-reclass-tools/bin/activate;
- pip install git+https://github.com/dis-xcom/reclass-tools;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Temporary workaround for removing virtual gtw nodes
- cmd: |
- sed -i 's/\-\ system\.salt\.control\.sizes\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- sed -i 's/\-\ system\.salt\.control\.placement\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: WR for mounting 1G hugepages before linux.state
- cmd: |
- salt 'cmp*' state.sls linux.system.hugepages;
- salt 'cmp*' cmd.run "mount -o mode=775,pagesize=1G -t hugetlbfs Hugetlbfs-kvm /mnt/hugepages_1G";
- salt 'cmp*' cmd.run "echo 16 | sudo tee /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: WR for correct acces to git repo from jenkins on cfg01 node
- cmd: |
- git clone --mirror https://github.com/Mirantis/mk-pipelines.git /home/repo/mk/mk-pipelines/;
- git clone --mirror https://github.com/Mirantis/pipeline-library.git /home/repo/mcp-ci/pipeline-library/;
- chown -R git:www-data /home/repo/mk/mk-pipelines/*;
- chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-#########################################
-# Configure all running salt minion nodes
-#########################################
-
-- description: Refresh pillars on all minions
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync all salt resources
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Show reclass-salt --top for generated nodes
- cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Add cpm nodes to /etc/hosts
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo '10.167.11.105 cmp01.cookied-bm-dpdk-pipeline.local cmp01' >> /etc/hosts";
- salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo '10.167.11.106 cmp02.cookied-bm-dpdk-pipeline.local cmp02' >> /etc/hosts";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: Temporary WR
- cmd: |
- ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts || true;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Enable Jenkins
- cmd: |
- systemctl enable jenkins || true;
- systemctl restart jenkins || true;
- sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: run jenkins.client
- cmd: |
- salt-call state.sls jenkins.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml
deleted file mode 100644
index b77550a..0000000
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604-hwe.yaml
deleted file mode 100644
index 44ae1f5..0000000
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604-hwe.yaml
+++ /dev/null
@@ -1,126 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup {interface_name}
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- # - echo "Preparing base OS"
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- # Install latest kernel
- # - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- # - reboot
- ########################################################
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- # The loopback network interface
- auto lo
- iface lo inet loopback
- auto {interface_name}
- iface {interface_name} inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604.yaml
deleted file mode 100644
index b39b37a..0000000
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,87 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- # The loopback network interface
- auto lo
- iface lo inet loopback
- auto {interface_name}
- iface {interface_name} inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml
deleted file mode 100644
index 53c19d1..0000000
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml
+++ /dev/null
@@ -1,495 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-dpdk-pipeline') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.2') %}
-{% set ETH2_IP_ADDRESS_CFG01 = os_env('ETH2_IP_ADDRESS_CFG01', '10.167.11.253') %}
-{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.11') %}
-{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.12') %}
-{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.13') %}
-{% set ETH0_IP_ADDRESS_CMP01 = os_env('ETH0_IP_ADDRESS_CMP01', '172.16.49.3') %}
-{% set ETH0_IP_ADDRESS_CMP02 = os_env('ETH0_IP_ADDRESS_CMP02', '172.16.49.31') %}
-{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.49.5') %}
-{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
-
-{% import 'cookied-bm-dpdk-pipeline/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-bm-dpdk-pipeline/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'cookied-bm-dpdk-pipeline/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_1604_HWE with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_1604_hwe {{ CLOUDINIT_USER_DATA_1604_HWE }}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.0/26:26') }}
- params:
- ip_reserved:
- gateway: +62
- l2_network_device: +61
- virtual_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
- default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
- default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
- default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
- default_{{ HOSTNAME_CMP01 }}: {{ ETH0_IP_ADDRESS_CMP01 }}
- default_{{ HOSTNAME_CMP02 }}: {{ ETH0_IP_ADDRESS_CMP02 }}
- default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
- default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
- ip_ranges:
- dhcp: [+2, -4]
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.11.0/24:24') }}
- params:
- ip_reserved:
- virtual_{{ HOSTNAME_CFG01 }}: {{ ETH2_IP_ADDRESS_CFG01 }}
- gateway: +1
- l2_network_device: +1
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.12.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.128/26:26') }}
- params:
- ip_reserved:
- gateway: '172.17.42.129'
- ip_ranges:
- dhcp: ['172.17.42.130', '172.17.42.180']
-
-
- groups:
- - name: virtual
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
-
- l2_network_devices:
- # Ironic management interface
- admin:
- address_pool: admin-pool01
- dhcp: false
- parent_iface:
- phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
- private:
- parent_iface:
- phys_dev: !os_env CONTROL_IFACE
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
-
- - name: default
- driver:
- name: devops_driver_ironic
- params:
- os_auth_token: fake-token
- ironic_url: !os_env IRONIC_URL # URL that will be used by fuel-devops
- # to access Ironic API
- # Agent URL that is accessible from deploying node when nodes
- # are bootstrapped with PXE. Usually PXE/provision network address is used.
- agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
- agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
-
- network_pools:
- admin: admin-pool01
-
- nodes:
- - name: {{ HOSTNAME_KVM01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces:
- - label: enp3s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
- - label: enp3s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
-
- network_config:
- enp3s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp3s0f1
-
- - name: {{ HOSTNAME_KVM02 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces:
- - label: enp3s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
- - label: enp3s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
-
- network_config:
- enp3s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp3s0f1
-
- - name: {{ HOSTNAME_KVM03 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM03 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces:
- - label: enp3s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
- - label: enp3s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
-
- network_config:
- enp3s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp3s0f1
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp5s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
- interfaces:
- - label: enp3s0f0
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP01
- - label: enp3s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP01
- - label: enp5s0f0
- l2_network_device: admin
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP01
- - label: enp5s0f1
- mac_address: !os_env ETH3_MAC_ADDRESS_CMP01
- network_config:
- enp5s0f0:
- networks:
- - admin
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp5s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
- interfaces:
- - label: enp3s0f0
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP02
- - label: enp3s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP02
- - label: enp5s0f0
- l2_network_device: admin
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP02
- - label: enp5s0f1
- mac_address: !os_env ETH3_MAC_ADDRESS_CMP02
- network_config:
- enp5s0f0:
- networks:
- - admin
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_GTW01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
- interfaces:
- - label: enp3s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_GTW01
- - label: enp3s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_GTW01
-
- network_config:
- enp3s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp3s0f1
-
- - name: {{ HOSTNAME_GTW02 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_GTW02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
- interfaces:
- - label: enp3s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_GTW02
- - label: enp3s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_GTW02
-
- network_config:
- enp3s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp3s0f1
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml
deleted file mode 100644
index 14e7c37..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM01 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM02 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM03 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/lab04-physical-inventory.yaml
deleted file mode 100644
index a3640c8..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/lab04-physical-inventory.yaml
+++ /dev/null
@@ -1,81 +0,0 @@
-nodes:
- cfg01.contrail-nfv.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- # Physical nodes
-
- kvm01.contrail-nfv.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm02.contrail-nfv.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm03.contrail-nfv.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- cmp001.contrail-nfv.local:
- reclass_storage_name: openstack_compute_node01
- roles:
- - openstack_compute_dpdk
- - features_lvm_backend
- - linux_system_codename_xenial
- interfaces:
- enp2s0f0:
- role: single_vlan_ctl
- single_address: 10.167.8.101
- enp2s0f1:
- role: single_mgm
- deploy_address: 172.16.49.73
- enp5s0f0:
- role: single_contrail_dpdk_vlan_prv
- tenant_address: 192.168.0.101
- dpdk_pci: "'0000:05:00.0'"
- dpdk_mac: '90:e2:ba:19:c2:18'
-
- cmp002.contrail-nfv.local:
- reclass_storage_name: openstack_compute_node02
- roles:
- - openstack_compute_dpdk
- - features_lvm_backend
- - linux_system_codename_xenial
- interfaces:
- enp2s0f0:
- role: single_vlan_ctl
- single_address: 10.167.8.102
- enp2s0f1:
- role: single_mgm
- deploy_address: 172.16.49.74
- enp5s0f0:
- role: single_contrail_dpdk_vlan_prv
- tenant_address: 192.168.0.102
- dpdk_pci: "'0000:05:00.0'"
- dpdk_mac: '00:1b:21:87:21:98'
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
deleted file mode 100644
index 875aace..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
+++ /dev/null
@@ -1,340 +0,0 @@
-{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set PATTERN = os_env('PATTERN', 'false') %}
-{% set RUN_TEMPEST = os_env('RUN_TEMPEST', 'false') %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
-
-- description: WR Install cinder volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:volume' state.sls cinder
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
-
-# install contrail
-- description: Install Opencontrail db on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail db on all nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database' state.sls opencontrail.database
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail control on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail control on all nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail on collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Workaround for https://mirantis.jira.com/browse/PROD-12798
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' service.restart 'keepalived'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# OpenContrail vrouters
-- description: Install Opencontrail client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database:id:1' state.sls 'opencontrail.client'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail client on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls 'opencontrail.client'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-# The next four steps should be converted to one (state.sls opencontrail) with skip_fail: false
-# It is related to bug with hugepages. So we need to add WR, then reboot
-# for only 1G hugepages were mounted. Then re-apply state
-- description: Install Opencontrail on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls 'opencontrail'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Hack vrouter (Delete default moun point)
- cmd: salt "cmp*" cmd.run "sed -i 's/exit 0//g' /etc/rc.local; echo 'umount /dev/hugepages; service supervisor-vrouter restart' >> /etc/rc.local; echo 'exit 0' >> /etc/rc.local"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Reboot computes
- cmd: salt --timeout=600 "cmp*" system.reboot
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Install Opencontrail on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls 'opencontrail'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Remove crashes files from /var/crashes/ while vrouter was crashed
- cmd: salt "cmp*" cmd.run "rm -rf /var/crashes/*"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Test Opencontrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' cmd.run 'contrail-status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install compute node
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-- description: Reboot computes
- cmd: |
- salt "cmp*" system.reboot;
- sleep 600;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Hack resolv.conf on VCP nodes for internal services access
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Create heat-net before external net create
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create heat-net'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create public network for contrail
- cmd: |
- salt 'ntw01*' contrail.virtual_network_create public '{"external":true,"ip_prefix":"192.168.200.0","ip_prefix_len":24,"asn":64512,"target":10000}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Steps from neutron client for contrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create heat-net 10.20.30.0/24 --allocation-pool start=10.20.30.10,end=10.20.30.254 --gateway 10.20.30.1 --name heat-subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Steps from neutron client for contrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create heat-router'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Steps from neutron client for contrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set heat-router public'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Steps from neutron client for contrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add heat-router heat-subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Fix default security group for access to external net from outside
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule list --column ID -f value | xargs openstack security group rule delete';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol tcp';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol tcp';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol icmp';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol icmp';
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-# Starting prepare runtest
-
-- description: Upload tempest template
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: runtest.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/
- node_name: {{ HOSTNAME_CFG01 }}
- skip_fail: False
-
-- description: Include class with tempest template into cfg node
- cmd: |
- sed -i 's/classes\:/classes\:\n- cluster.{{ LAB_CONFIG_NAME }}.infra.runtest/g' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml;
- salt '*' saltutil.refresh_pillar;
- salt '*' saltutil.sync_all;
- salt 'ctl01*' pkg.install docker.io;
- salt 'ctl01*' cmd.run 'iptables --policy FORWARD ACCEPT';
- salt 'cfg01*' state.sls salt.minion && sleep 20;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Enforce keystone client
- cmd: |
- salt 'cfg01*' state.sls keystone.client;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Steps from nova client for dpdk
- cmd: |
- . /root/keystonercv3; nova flavor-create m1.extra_tiny_test 998 1024 5 1;
- nova flavor-create m1.tiny_test 999 1024 5 1;
- nova flavor-key m1.extra_tiny_test set hw:mem_page_size=1GB;
- nova flavor-key m1.tiny_test set hw:mem_page_size=1GB;
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Upload cirros image
- cmd: |
- salt 'cfg01*' state.sls glance.client;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Generate tempest config
- cmd: |
- salt 'cfg01*' state.sls runtest;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Download cirros image for runtest
- cmd: |
- wget http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img -O /tmp/TestCirros-0.3.5.img
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Test future contrail manipulation
- cmd: |
- apt install crudini jq -y;
- crudini --set /tmp/test/tempest.conf auth tempest_roles admin;
- crudini --set /tmp/test/tempest.conf patrole custom_policy_files /etc/opencontrail/policy.json;
- crudini --set /tmp/test/tempest.conf sdn service_name opencontrail;
- cat /tmp/test/tempest.conf;
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Run tempest from new docker image
- cmd: |
- OPENSTACK_VERSION=`salt-call --out=newline_values_only pillar.get _param:openstack_version`;
- docker run --name "run-tempest-yml" -d -e ARGS="-r test -w 2" -v /tmp/test/tempest.conf:/etc/tempest/tempest.conf -v /tmp/:/tmp/ -v /tmp/test:/root/tempest -v /etc/ssl/certs/:/etc/ssl/certs/ docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:$OPENSTACK_VERSION /bin/bash -c "run-tempest";
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Test Wait container script
- cmd: |
- report_file=`find /tmp/test -maxdepth 1 -name 'report_*xml' -print -quit`;
- if [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ -f "$report_file" ];
- then echo "All done!"; docker logs run-tempest-yml;
- elif [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ ! -f "$report_file" ];
- then echo "Exit without report!"; docker logs run-tempest-yml;
- else echo "Tempest not finished... ";sleep 900; false;
- fi
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 25, delay: 30}
- skip_fail: false
-
-- description: Download xml results
- download:
- remote_path: /tmp/test/
- remote_filename: "report_*.xml"
- local_path: {{ os_env('PWD') }}
- node_name: {{ HOSTNAME_CTL01 }}
- skip_fail: true
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/runtest.yml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/runtest.yml
deleted file mode 100644
index f0d6d8a..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/runtest.yml
+++ /dev/null
@@ -1,47 +0,0 @@
-classes:
-- service.runtest.tempest
-- service.runtest.tempest.public_net
-- service.runtest.tempest.services.manila.glance
-parameters:
- _param:
- glance_image_cirros_location: http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img
- glance_image_fedora_location: http://cz8133.bud.mirantis.net:8099/Fedora-Cloud-Base-27-1.6.x86_64.qcow2
- glance_image_manila_location: http://cz8133.bud.mirantis.net:8099/manila-service-image-master.qcow2
- openstack_public_neutron_subnet_allocation_end: 192.168.200.220
- openstack_public_neutron_subnet_allocation_start: 192.168.200.130
- openstack_public_neutron_subnet_cidr: 192.168.200.0/24
- openstack_public_neutron_subnet_gateway: 192.168.200.1
- runtest_tempest_cfg_dir: /tmp/test
- runtest_tempest_cfg_name: tempest.conf
- runtest_tempest_public_net: public
- tempest_test_target: ctl01*
- neutron:
- client:
- enabled: true
- runtest:
- enabled: true
- keystonerc_node: ctl01*
- tempest:
- DEFAULT:
- log_file: tempest.log
- cfg_dir: ${_param:runtest_tempest_cfg_dir}
- cfg_name: ${_param:runtest_tempest_cfg_name}
- compute:
- min_compute_nodes: 2
- convert_to_uuid:
- network:
- public_network_id: ${_param:runtest_tempest_public_net}
- enabled: true
- heat_plugin:
- build_timeout: '600'
- put_keystone_rc_enabled: false
- put_local_image_file_enabled: false
- share:
- capability_snapshot_support: true
- run_driver_assisted_migration_tests: false
- run_manage_unmanage_snapshot_tests: false
- run_manage_unmanage_tests: false
- run_migration_with_preserve_snapshots_tests: false
- run_quota_tests: true
- run_replication_tests: false
- run_snapshot_tests: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-cookiecutter-contrail-dpdk.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-cookiecutter-contrail-dpdk.yaml
deleted file mode 100644
index bfd683a..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-cookiecutter-contrail-dpdk.yaml
+++ /dev/null
@@ -1,203 +0,0 @@
-default_context:
- backup_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEA1Ne+K9yyvgc8Z0QO2qt5eacOQmtbwmChnptlG0e+JdIorz5l
- ++AiduF5KrPt7rlx5Ow8XHiBUqcrAwFiVGzGYOv0YJhBgZzpwKPcSAP8jo56d7ZB
- RNMZAB3kgjODvWa7DIp6M5cXZ2FmCeFW72HbaZd3B0lAUiqm9si9gZBkircaYdbu
- DqeAngHiNVbWjxHh9hZ8cWidk++98GYH5HbLJ7L9l+2rFGyW1EwtiMb6SyipDAdu
- szEuemU3coJebArzm3Is6lmatcVTe5M6DTgK7IP7cd2DqqYa5tYDN55SwAd93wUc
- oyCbL5DmAGNcCMbFE/CGI0UYBrx7XXatgskj9wIDAQABAoIBAGv43fImTU51IUFJ
- lzd60W7TPjqXd78Ngi+RqSLDrERHbngn8VhrBVDFZNAy4rq9vHdjF+PZBdHGF924
- cAdf/urgB+KQmnqD/VjKR08JJq+yu5HLFSUy6XXTtD4Qn/4PBUiBXyiYtzisgjT0
- 6UMao1HXknxRvp1wIa8Deo8ljruG5n/1ZX7x7eqSud5xgKz4MlXVwxgoA373R0mg
- m8S2p7b3wS2vhWpf3oU3Y47q1Gdd5aOYblqkw3yvkUvBoSW9iwoNc5vLB6hHHs36
- gMO5bANLdhbrf2eRULUsbRwuewiHhu5GaWSF0/FKcf1V7OoBcaO767ZEpnhMtFag
- Rb9TJtECgYEA87ojKcwmbUUBqu4MPNqc5kIYMn3yEV/q+8uMM8cK79iumG9htrcF
- E8WhwZBBj7BUzO57LmsrADVu1FMpHFTq1kVpg7flldz59EcaUYL0dvGbRyygrVNO
- 7/rxWPZfaXgxJhyzp2gODFhH4SA+5cZALDCtmgArCquGhbh5xA70sO0CgYEA3496
- b3KK1TAflUH/n3YEaXn+rp+tOLnLovOGPXlTzEvVWJCxoXcwpiV1fBuleENto9S/
- 90KQVhVh9XJFvSy+AiRlDNvKX3fEJ/x7YdDrWfoU2KiWpkydxrE1i6ZSRomPyFZO
- Ik7eMXXEaYS99I8EPGo7w/i3m4J1DMP78Fbqn/MCgYANu+ZxW4Sq0aGnbSSZZURE
- IWNVrQ0v93S5XZ336PZvz4j/I/+gmS2bEJ7t1mArZadRqPqeAgH5UAl/w3PkmdBl
- 5KFuf7VbPYmEP2F3SGbYMQMr/pLLUY94LG7fMXrs6Y3zdNiWzWFFRtA+GmAQ+Jvz
- IYcWz32da074SA3sg188fQKBgQCCAE80S6lL+2YCR5S8R8adB2IAbb4vRGuUYIRp
- bwo5vMddbxa8TDEwDIxbFUCNxLgXEvpmcIC6bki+kCrZrRD48e0JIy51gZHBpuKg
- qPqTIgfJTY/9OIRvLFF02czyU8AWwYlCDhbLMC59JcHIWvodn7ENbq5mceBbAgSZ
- aBGb3QKBgQCyzxau9wyntLPukNJMOSAPnvssBp3Op1mF+eOy9PnwqhlJMPeQnraO
- qeM3zYUcIsYUbEBE394mmbH4JexaqWLCU4p7tcMK15ALWnlk6QZqBwZqSNYYDvXK
- ahdK0isC35cOm+IFYBhLLJfjONZVzT1qj9cD+bxEIvSberA/urkLmw==
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDU174r3LK+BzxnRA7aq3l5pw5Ca1vCYKGem2UbR74l0iivPmX74CJ24Xkqs+3uuXHk7DxceIFSpysDAWJUbMZg6/RgmEGBnOnAo9xIA/yOjnp3tkFE0xkAHeSCM4O9ZrsMinozlxdnYWYJ4VbvYdtpl3cHSUBSKqb2yL2BkGSKtxph1u4Op4CeAeI1VtaPEeH2FnxxaJ2T773wZgfkdssnsv2X7asUbJbUTC2IxvpLKKkMB26zMS56ZTdygl5sCvObcizqWZq1xVN7kzoNOArsg/tx3YOqphrm1gM3nlLAB33fBRyjIJsvkOYAY1wIxsUT8IYjRRgGvHtddq2CySP3
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: contrail-nfv.local
- cluster_name: cookied-bm-mcp-ocata-contrail-nfv
- compute_bond_mode: active-backup
- compute_padding_with_zeros: 'True'
- compute_primary_first_nic: enp5s0f0
- compute_primary_second_nic: enp5s0f1
- context_seed: 5Y5caLTPMlq2bA5VpY8E1vXDt4ajJ6t4pVtClPXn0WCGNtM7GHw4qLYZknH2R1pt
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.8.0/24
- control_vlan: '2422'
- cookiecutter_template_branch: ''
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 172.16.49.126
- deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.49.64/26
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: sgudz@mirantis.com
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.8.241
- infra_kvm01_deploy_address: 172.16.49.67
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.8.242
- infra_kvm02_deploy_address: 172.16.49.68
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.8.243
- infra_kvm03_deploy_address: 172.16.49.69
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.8.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 172.16.49.66
- maas_hostname: cfg01
- mcp_common_scripts_branch: ''
- mcp_version: proposed
- offline_deployment: 'False'
- opencontrail_version: 3.0
- linux_repo_contrail_component: oc32
- opencontrail_analytics_address: 10.167.8.30
- opencontrail_analytics_hostname: nal
- opencontrail_analytics_node01_address: 10.167.8.31
- opencontrail_analytics_node01_hostname: nal01
- opencontrail_analytics_node02_address: 10.167.8.32
- opencontrail_analytics_node02_hostname: nal02
- opencontrail_analytics_node03_address: 10.167.8.33
- opencontrail_analytics_node03_hostname: nal03
- opencontrail_compute_iface: enp5s0f0.${_param:tenant_vlan}
- opencontrail_compute_iface_mask: '24'
- opencontrail_control_address: 10.167.8.20
- opencontrail_control_hostname: ntw
- opencontrail_control_node01_address: 10.167.8.21
- opencontrail_control_node01_hostname: ntw01
- opencontrail_control_node02_address: 10.167.8.22
- opencontrail_control_node02_hostname: ntw02
- opencontrail_control_node03_address: 10.167.8.23
- opencontrail_control_node03_hostname: ntw03
- opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.220
- opencontrail_router01_hostname: rtr01
- opencontrail_router02_address: 10.167.8.102
- opencontrail_router02_hostname: rtr02
- openstack_benchmark_node01_address: 10.167.8.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.8
- openstack_compute_rack01_tenant_subnet: 192.168.0
- openstack_control_address: 10.167.8.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.8.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.8.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.8.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.8.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.8.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.8.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.8.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_message_queue_address: 10.167.8.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.8.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.8.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.8.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: opencontrail
- openstack_neutron_bgp_vpn: 'False'
- openstack_neutron_bgp_vpn_driver: bagpipe
- openstack_nfv_dpdk_enabled: 'True'
- openstack_nfv_sriov_enabled: 'True'
- openstack_nfv_sriov_network: physnet1
- openstack_nfv_sriov_numvfs: '7'
- openstack_nfv_sriov_pf_nic: enp5s0f1
- openstack_nova_compute_hugepages_count: '16'
- openstack_nova_compute_nfv_req_enabled: 'True'
- openstack_nova_cpu_pinning: 6,7,8,9,10,11
- openstack_proxy_address: 10.167.8.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.8.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.8.82
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.8.19
- openstack_version: ocata
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_notification_smtp_use_tls: 'False'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_host: 127.0.0.1
- oss_pushkin_smtp_port: '587'
- oss_webhook_app_id: '24'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- salt_api_password: 3SjmVdpfyQfhBVhaTJ3t39EWxBWeUzMr
- salt_api_password_hash: $6$XLFCxibF$HqQC55s/Hl78vPrrpM8KJOfjXboakdS6ctgsEhO/DVWCN3ecxrg/TaLh0l2ieS6ukdBDurskX73FOIqz2Fs53/
- salt_master_address: 172.16.49.66
- salt_master_hostname: cfg01
- salt_master_management_address: 172.16.49.66
- shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- stacklight_enabled: 'True'
- stacklight_log_address: 10.167.8.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.8.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.8.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.8.63
- stacklight_log_node03_hostname: log03
- stacklight_long_term_storage_type: influxdb
- stacklight_monitor_address: 10.167.8.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.8.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.8.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.8.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 10.167.8.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.8.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.8.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.8.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.220
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 192.168.0.0/24
- tenant_vlan: '2423'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'True'
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-environment.yaml
deleted file mode 100644
index fbf7d4a..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-environment.yaml
+++ /dev/null
@@ -1,238 +0,0 @@
-nodes:
- # Virtual Control Plane nodes
-
- ctl01.contrail-nfv.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_ctl
-
- ctl02.contrail-nfv.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ctl03.contrail-nfv.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs01.contrail-nfv.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs02.contrail-nfv.local:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs03.contrail-nfv.local:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg01.contrail-nfv.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg02.contrail-nfv.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg03.contrail-nfv.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- prx01.contrail-nfv.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- prx02.contrail-nfv.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon01.contrail-nfv.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon02.contrail-nfv.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon03.contrail-nfv.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- nal01.contrail-nfv.local:
- reclass_storage_name: opencontrail_analytics_node01
- roles:
- - opencontrail_analytics
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- nal02.contrail-nfv.local:
- reclass_storage_name: opencontrail_analytics_node02
- roles:
- - opencontrail_analytics
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- nal03.contrail-nfv.local:
- reclass_storage_name: opencontrail_analytics_node03
- roles:
- - opencontrail_analytics
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- ntw01.contrail-nfv.local:
- reclass_storage_name: opencontrail_control_node01
- roles:
- - opencontrail_control
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- ntw02.contrail-nfv.local:
- reclass_storage_name: opencontrail_control_node02
- roles:
- - opencontrail_control
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- ntw03.contrail-nfv.local:
- reclass_storage_name: opencontrail_control_node03
- roles:
- - opencontrail_control
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- mtr01.contrail-nfv.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr02.contrail-nfv.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr03.contrail-nfv.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log01.contrail-nfv.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log02.contrail-nfv.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log03.contrail-nfv.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
deleted file mode 100644
index 9332875..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
+++ /dev/null
@@ -1,185 +0,0 @@
-{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM01 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM02 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM03 with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-mcp-ocata-contrail-nfv') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail-dpdk.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-- description: "Workaround for rack01 compute generator"
- cmd: |
- set -e;
- # Remove rack01 key
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- # Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-- description: "WR for dpdk pci to be in correct quotes"
- cmd: |
- set -e;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.compute_vrouter_dpdk_pci '0000:05:00.0' /srv/salt/reclass/nodes/_generated/cmp001.{{ DOMAIN_NAME }}.yml;
- reclass-tools add-key parameters._param.compute_vrouter_dpdk_pci '0000:05:00.0' /srv/salt/reclass/nodes/_generated/cmp002.{{ DOMAIN_NAME }}.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-- description: Rerun openssh after env model is generated
- cmd: |
- salt-call state.sls openssh
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.network.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: WR for mounting 1G hugepages before linux.state
- cmd: |
- salt 'cmp*' state.sls linux.system.hugepages;
- salt 'cmp*' cmd.run "mount -o mode=775,pagesize=1G -t hugetlbfs Hugetlbfs-kvm /mnt/hugepages_1G";
- salt 'cmp*' cmd.run "echo 16 | sudo tee /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Temporary WR for correct bridge name according to envoronment templates
- cmd: |
- sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: "WR for correct opencontrail_compute_iface value. Cookiecutter context doesn't have such parameter"
- cmd: |
- set -e;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.opencontrail_compute_iface 'enp5s0f0' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/opencontrail/init.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-########################################
-# Spin up Control Plane VMs on KVM nodes
-########################################
-
-- description: Execute 'libvirt' states to create necessary libvirt networks
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Create VMs for control plane
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
- cmd: |
- salt-key -l acc| sort > /tmp/current_keys.txt &&
- salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 20, delay: 30}
- skip_fail: false
-
-#########################################
-# Configure all running salt minion nodes
-#########################################
-
-- description: Hack resolv.conf on VCP nodes for internal services access
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Refresh pillars on all minions
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync all salt resources
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Show reclass-salt --top for generated nodes
- cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: "Lab04 workaround: Give each node root acces with key from cfg01"
- cmd: |
- set -e;
- set -x;
- key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
- salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
- salt '*' cmd.run "service sshd restart"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: "Lab04 workaround: Control network access from cfg01 node using sshuttle via kvm01"
- cmd: |
- set -e;
- set -x;
- KVM01_DEPLOY_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:infra_kvm_node01_deploy_address);
- apt-get install -y sshuttle;
- sshuttle -r ${KVM01_DEPLOY_ADDRESS} 10.167.8.0/24 -D >/dev/null;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml
deleted file mode 100644
index c25aab3..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-sl.yaml' as SHARED_SL with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
-{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
-{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
-{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
-{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
-{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
-{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
-{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--meta-data.yaml
deleted file mode 100644
index a594a53..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 59a799e..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,79 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data1604-hwe.yaml
deleted file mode 100644
index 106c3d5..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data1604-hwe.yaml
+++ /dev/null
@@ -1,99 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup {interface_name}
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- # - echo "Preparing base OS"
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- # Install latest kernel
- # - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- # - reboot
- ########################################################
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- # The loopback network interface
- auto lo
- iface lo inet loopback
- auto {interface_name}
- iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data1604.yaml
deleted file mode 100644
index 915981e..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,95 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup {interface_name}
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- # - echo "Preparing base OS"
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- ########################################################
- # Node is ready, allow SSH access
- # - echo "Allow SSH access ..."
- # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- # The loopback network interface
- auto lo
- iface lo inet loopback
- auto {interface_name}
- iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml
deleted file mode 100644
index 2f27c0f..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml
+++ /dev/null
@@ -1,406 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-mcp-ocata-contrail-nfv') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
-{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
-{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.68') %}
-{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.69') %}
-{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.73') %}
-{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
-
-{% import 'cookied-bm-mcp-ocata-contrail-nfv/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
-{% import 'cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_HWE with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
- - &cloudinit_user_data_hwe {{ CLOUDINIT_USER_DATA_HWE }}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-bm-mcp-ocata-contrail-nfv_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
- params:
- ip_reserved:
- gateway: +62
- l2_network_device: +61
- default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
- default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
- default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
- default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
- default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
- default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
- default_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
- virtual_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
- virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
- virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
- virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
- virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
- virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: -2
-
- groups:
- - name: virtual
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-
- network_pools:
- admin: admin-pool01
-
- l2_network_devices:
- # Ironic management interface
- admin:
- address_pool: admin-pool01
- dhcp: false
- parent_iface:
- phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
- network_config:
- ens3:
- networks:
- - admin
-
- - name: default
- driver:
- name: devops_driver_ironic
- params:
- os_auth_token: fake-token
- ironic_url: !os_env IRONIC_URL # URL that will be used by fuel-devops
- # to access Ironic API
- # Agent URL that is accessible from deploying node when nodes
- # are bootstrapped with PXE. Usually PXE/provision network address is used.
- agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
- agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
-
- network_pools:
- admin: admin-pool01
-
- nodes:
- - name: {{ HOSTNAME_KVM01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp9s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
- - label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
-
- network_config:
- enp9s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
-
- - name: {{ HOSTNAME_KVM02 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp9s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
- - label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
-
- network_config:
- enp9s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
-
- - name: {{ HOSTNAME_KVM03 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM03 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp9s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
- - label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
-
- network_config:
- enp9s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
-
-
- - name: {{ HOSTNAME_CMP001 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP001 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_hwe
-
- interfaces:
- - label: enp2s0f0
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
- - label: enp2s0f1
- l2_network_device: admin
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
- - label: enp5s0f0
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
- features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
- - label: enp5s0f1
- mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
- features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
-
- network_config:
- enp2s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp5s0f0
-
- - name: {{ HOSTNAME_CMP002 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP002 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_hwe
-
- interfaces:
- - label: enp2s0f0
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
- - label: enp2s0f1
- l2_network_device: admin
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
- - label: enp5s0f0
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
- features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
- - label: enp5s0f1
- mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
- features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
-
- network_config:
- enp2s0f1:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp5s0f0
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml
deleted file mode 100644
index d837e26..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/lab04-physical-inventory.yaml
deleted file mode 100644
index 7bf4d2e..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/lab04-physical-inventory.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-nodes:
- cfg01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- # Physical nodes
-
- kvm01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- cmp001.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_compute_node01
- roles:
- - openstack_compute
- - features_lvm_backend
- - linux_system_codename_xenial
- interfaces:
- enp2s0f1:
- role: single_mgm
- deploy_address: 172.16.49.73
- enp5s0f0:
- role: bond0_ab_contrail
- tenant_address: 192.168.0.101
- enp5s0f1:
- role: single_vlan_ctl
- single_address: 10.167.8.101
-
- cmp002.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_compute_node02
- roles:
- - openstack_compute
- - features_lvm_backend
- - linux_system_codename_xenial
- interfaces:
- enp2s0f1:
- role: single_mgm
- deploy_address: 172.16.49.74
- enp5s0f0:
- role: bond0_ab_contrail
- tenant_address: 192.168.0.102
- enp5s0f1:
- role: single_vlan_ctl
- single_address: 10.167.8.102
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
deleted file mode 100644
index b7e5829..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
+++ /dev/null
@@ -1,323 +0,0 @@
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set PATTERN = os_env('PATTERN', 'false') %}
-{% set RUN_TEMPEST = os_env('RUN_TEMPEST', 'false') %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
-
-- description: WR Install cinder volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:volume' state.sls cinder
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
-
-# install contrail
-- description: Install Opencontrail db on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail db on all nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database' state.sls opencontrail.database
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail control on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail control on all nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail on collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Workaround for https://mirantis.jira.com/browse/PROD-12798
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' service.restart 'keepalived'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# OpenContrail vrouters
-- description: Install Opencontrail client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database:id:1' state.sls 'opencontrail.client'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail client on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls 'opencontrail.client'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls 'opencontrail'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Test Opencontrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' cmd.run 'contrail-status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-- description: Hack resolv.conf on VCP nodes for internal services access
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Hack vrouter (Delete default moun point)
- cmd: salt "cmp*" cmd.run "sed -i 's/exit 0//g' /etc/rc.local; echo 'umount /dev/hugepages; service supervisor-vrouter restart' >> /etc/rc.local; echo 'exit 0' >> /etc/rc.local"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Temporary WR for correct pci in vrouter.conf
- cmd: salt "cmp*" cmd.run "sed -i 's/physical\_interface\_address\=.*/physical\_interface\_address=0000\:05\:00\.0/g' /etc/contrail/contrail-vrouter-agent.conf"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Remove crashes files from /var/crashes/ while vrouter was crashed
- cmd: salt "cmp*" cmd.run "rm -rf /var/crashes/*"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Reboot computes
- cmd: |
- salt "cmp*" system.reboot;
- sleep 600;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Hack resolv.conf on VCP nodes for internal services access
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Create heat-net before external net create
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create heat-net'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create public network for contrail
- cmd: |
- salt 'ntw01*' contrail.virtual_network_create public '{"external":true,"ip_prefix":"192.168.200.0","ip_prefix_len":24,"asn":64512,"target":10000}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Steps from neutron client for contrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create heat-net 10.20.30.0/24 --allocation-pool start=10.20.30.10,end=10.20.30.254 --gateway 10.20.30.1 --name heat-subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Steps from neutron client for contrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create heat-router'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Steps from neutron client for contrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set heat-router public'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Steps from neutron client for contrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add heat-router heat-subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Fix default security group for access to external net from outside
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule list --column ID -f value | xargs openstack security group rule delete';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol tcp';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol tcp';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol icmp';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol icmp';
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-# Starting prepare runtest
-
-- description: Upload tempest template
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: runtest.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/
- node_name: {{ HOSTNAME_CFG01 }}
- skip_fail: False
-
-- description: Include class with tempest template into cfg node
- cmd: |
- sed -i 's/classes\:/classes\:\n- cluster.{{ LAB_CONFIG_NAME }}.infra.runtest/g' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml;
- salt '*' saltutil.refresh_pillar;
- salt '*' saltutil.sync_all;
- salt 'ctl01*' pkg.install docker.io;
- salt 'ctl01*' cmd.run 'iptables --policy FORWARD ACCEPT';
- salt 'cfg01*' state.sls salt.minion && sleep 20;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Enforce keystone client
- cmd: |
- salt 'cfg01*' state.sls keystone.client;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Create flavors for tests
- cmd: |
- salt 'cfg01*' state.sls nova.client;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Upload cirros image
- cmd: |
- salt 'cfg01*' state.sls glance.client;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Generate tempest config
- cmd: |
- salt 'cfg01*' state.sls runtest;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Download cirros image for runtest
- cmd: |
- wget http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img -O /tmp/TestCirros-0.3.5.img
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Test future contrail manipulation
- cmd: |
- apt install crudini jq -y;
- crudini --set /tmp/test/tempest.conf auth tempest_roles admin;
- crudini --set /tmp/test/tempest.conf patrole custom_policy_files /etc/opencontrail/policy.json;
- crudini --set /tmp/test/tempest.conf sdn service_name opencontrail;
- cat /tmp/test/tempest.conf;
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
-- description: Run tempest from new docker image
- cmd: |
- OPENSTACK_VERSION=`salt-call --out=newline_values_only pillar.get _param:openstack_version`;
- docker run --name "run-tempest-yml" -d -e ARGS="-r test -w 2" -v /tmp/test/tempest.conf:/etc/tempest/tempest.conf -v /tmp/:/tmp/ -v /tmp/test:/root/tempest -v /etc/ssl/certs/:/etc/ssl/certs/ docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:$OPENSTACK_VERSION /bin/bash -c "run-tempest";
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Test Wait container script
- cmd: |
- report_file=`find /tmp/test -maxdepth 1 -name 'report_*xml' -print -quit`;
- if [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ -f "$report_file" ];
- then echo "All done!"; docker logs run-tempest-yml;
- elif [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ ! -f "$report_file" ];
- then echo "Exit without report!"; docker logs run-tempest-yml;
- else echo "Tempest not finished... ";sleep 900; false;
- fi
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 25, delay: 30}
- skip_fail: false
-
-- description: Download xml results
- download:
- remote_path: /tmp/test/
- remote_filename: "report_*.xml"
- local_path: {{ os_env('PWD') }}
- node_name: {{ HOSTNAME_CTL01 }}
- skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/runtest.yml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/runtest.yml
deleted file mode 100644
index f0d6d8a..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/runtest.yml
+++ /dev/null
@@ -1,47 +0,0 @@
-classes:
-- service.runtest.tempest
-- service.runtest.tempest.public_net
-- service.runtest.tempest.services.manila.glance
-parameters:
- _param:
- glance_image_cirros_location: http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img
- glance_image_fedora_location: http://cz8133.bud.mirantis.net:8099/Fedora-Cloud-Base-27-1.6.x86_64.qcow2
- glance_image_manila_location: http://cz8133.bud.mirantis.net:8099/manila-service-image-master.qcow2
- openstack_public_neutron_subnet_allocation_end: 192.168.200.220
- openstack_public_neutron_subnet_allocation_start: 192.168.200.130
- openstack_public_neutron_subnet_cidr: 192.168.200.0/24
- openstack_public_neutron_subnet_gateway: 192.168.200.1
- runtest_tempest_cfg_dir: /tmp/test
- runtest_tempest_cfg_name: tempest.conf
- runtest_tempest_public_net: public
- tempest_test_target: ctl01*
- neutron:
- client:
- enabled: true
- runtest:
- enabled: true
- keystonerc_node: ctl01*
- tempest:
- DEFAULT:
- log_file: tempest.log
- cfg_dir: ${_param:runtest_tempest_cfg_dir}
- cfg_name: ${_param:runtest_tempest_cfg_name}
- compute:
- min_compute_nodes: 2
- convert_to_uuid:
- network:
- public_network_id: ${_param:runtest_tempest_public_net}
- enabled: true
- heat_plugin:
- build_timeout: '600'
- put_keystone_rc_enabled: false
- put_local_image_file_enabled: false
- share:
- capability_snapshot_support: true
- run_driver_assisted_migration_tests: false
- run_manage_unmanage_snapshot_tests: false
- run_manage_unmanage_tests: false
- run_migration_with_preserve_snapshots_tests: false
- run_quota_tests: true
- run_replication_tests: false
- run_snapshot_tests: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
deleted file mode 100644
index 03e966e..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
+++ /dev/null
@@ -1,197 +0,0 @@
-default_context:
- backup_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAskZyhTySYWvGrp+dlv+q2La8oiM8Sv1JfQog8+UW28hGUbCq
- PnWa7bzVqENEY+g+qbQYh2Pvb2xURyY9+02TWLmCYzq7+orO1eG2SDt384YzDc6B
- nQohUbGwaSH2YsT/QA8KV1fvUW9P0gVEHmJWR8Jo3vdRyw+tEUszYkiTYkcVc9zt
- O5eYT9a6gSjLhl+tmJXu38jdWTcE8zJP+OGpJRDJpV2EAYwv+LXde9REv4KCGMqN
- lvli9IA+zarfMsAzSTPpL5ooslB20akKM1h5li3LG47OpKMG9sMVFGEEo7+lqTGa
- zUJEUPbJU95PqLfxoIOjYF/MhWKU5VALLraVwwIDAQABAoIBAHUAj0CqlFej6G3L
- DB6CBZrds8el4HhG6+hIsX/gprPwKVaL3+/GN7w35vHb1BLN5fJy5HZXPFmge1+G
- 1b8NFttwRQbjEWRJnZ352Sxm+z60oOU61w4+C8gWGnWleJMyP2PHez3/1G1Z5MUt
- 95sJZx8JlNJg9ymSTD/BXyPuBezFKf8jUSgPbhBv8B2yy82YGzqc9u7sK6BN90P1
- 3ZcynQ4cfieZLoim56dF9YEixr8plGmGpOspPZFlVCGIc1y2BC4ZUyDatcCa7/gQ
- 3aDdt9lkEfoCHezAFOmaZDCOZ70spkwCqXYk42BXpDjKF6djCXyx3WKVF+IhEOYT
- /S1I8KECgYEA1tzUZgW43/Z7Sm+QnoK3R9hG2oZZOoiTDdHyC+f5dk6maNbJbivM
- FAPIpzHtCyvBEiSgGmlqpUrwR2NbYnOHjOX72Yq7/e0Vl1XWmplKCsTDNFGXx5Fs
- 9AQbWjOF+drgfZ5p3dNyE9689nJZg5EhTxL8dfwnZat/l+/OKFO2tM0CgYEA1GhW
- 4FMsXe3/g7M0dj5FnbS8xjz93pei5YZanID9mY/RUcKbegdtfvtwsfkZe5trbI39
- jv96QyJeAnf48UDFwCV6SSZF/8Ez0pYHZanwkwKsli5uZdvSz7rUyVwE6tyO24WA
- Trgpmbb8uxhJHBNuD+bC/iGd1H0IUuJ65ChD9M8CgYEAxfp2z4boQZ2ZHw5LoHLr
- tIyJROPUJgYgEfToZaYbC7MOzL1Un2pFwg92fPCY7dkkLraGu690r9esLOtVEhNH
- zEFB3cJi1Gf3pBlZA9zJB8Ej6Pphs2bBkNqT5XpiMcZHYhhsjhQ+Iibz0NWuu3cn
- zPe+nmx4VMtAZ1x0hl4UlOUCgYBh8NaWS2O60AIwrRI0r5QUGwGsevUojU0Mfr4L
- SiMlir4e8BgW1ZP0qGYXel/4sfp/rJ1NMZojmJY2lzFPtOk6J03SqvY97s1BffJd
- O1X1w5bnDPPUvd7f8CsryeVuklLBADbglWSBP3IbpyAW9RKb/HDPE5seHqUW6t11
- lOd42wKBgBW0tTV6+aqao6o4ZBU0SVoNg9/PwgOubAzeSDW2+ArXn1sMmroSfXVw
- fbUTJI5TF/1pd7A5AKoR1FfTqlssoYlMGEPI6HJ4n9/4SqLpwd99HFW0ISt+EUMh
- Tqt9mDfKzwHxG2QTuOwyrslO8iTwRoN5OYgm4dsL471Obr4DILTz
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyRnKFPJJha8aun52W/6rYtryiIzxK/Ul9CiDz5RbbyEZRsKo+dZrtvNWoQ0Rj6D6ptBiHY+9vbFRHJj37TZNYuYJjOrv6is7V4bZIO3fzhjMNzoGdCiFRsbBpIfZixP9ADwpXV+9Rb0/SBUQeYlZHwmje91HLD60RSzNiSJNiRxVz3O07l5hP1rqBKMuGX62Yle7fyN1ZNwTzMk/44aklEMmlXYQBjC/4td171ES/goIYyo2W+WL0gD7Nqt8ywDNJM+kvmiiyUHbRqQozWHmWLcsbjs6kowb2wxUUYQSjv6WpMZrNQkRQ9slT3k+ot/Ggg6NgX8yFYpTlUAsutpXD
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-bm-mcp-ocata-contrail.local
- cluster_name: cookied-bm-mcp-ocata-contrail
- compute_bond_mode: active-backup
- compute_padding_with_zeros: 'True'
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: GAZbu3hguubkeYMg15AQ1J6UuY60TAh8h0EVpNUrHWhjRS2SxRYBuxxLvVURv82m
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.8.0/24
- control_vlan: '2422'
- cookiecutter_template_branch: ''
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 172.16.49.126
- deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.49.64/26
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: sgudz@mirantis.com
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.8.241
- infra_kvm01_deploy_address: 172.16.49.67
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.8.242
- infra_kvm02_deploy_address: 172.16.49.68
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.8.243
- infra_kvm03_deploy_address: 172.16.49.69
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.8.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 172.16.49.66
- maas_hostname: cfg01
- mcp_common_scripts_branch: ''
- mcp_version: 2018.3.1
- offline_deployment: 'False'
- opencontrail_version: 3.0
- linux_repo_contrail_component: oc32
- opencontrail_analytics_address: 10.167.8.30
- opencontrail_analytics_hostname: nal
- opencontrail_analytics_node01_address: 10.167.8.31
- opencontrail_analytics_node01_hostname: nal01
- opencontrail_analytics_node02_address: 10.167.8.32
- opencontrail_analytics_node02_hostname: nal02
- opencontrail_analytics_node03_address: 10.167.8.33
- opencontrail_analytics_node03_hostname: nal03
- opencontrail_compute_iface_mask: '24'
- opencontrail_control_address: 10.167.8.20
- opencontrail_control_hostname: ntw
- opencontrail_control_node01_address: 10.167.8.21
- opencontrail_control_node01_hostname: ntw01
- opencontrail_control_node02_address: 10.167.8.22
- opencontrail_control_node02_hostname: ntw02
- opencontrail_control_node03_address: 10.167.8.23
- opencontrail_control_node03_hostname: ntw03
- opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.220
- opencontrail_router01_hostname: rtr01
- opencontrail_router02_address: 10.167.8.101
- opencontrail_router02_hostname: rtr02
- openstack_benchmark_node01_address: 10.167.8.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.8
- openstack_compute_rack01_tenant_subnet: 192.168.0
- openstack_control_address: 10.167.8.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.8.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.8.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.8.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.8.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.8.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.8.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.8.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_message_queue_address: 10.167.8.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.8.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.8.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.8.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: opencontrail
- openstack_neutron_bgp_vpn: 'False'
- openstack_neutron_bgp_vpn_driver: bagpipe
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_proxy_address: 10.167.8.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.8.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.8.82
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.8.19
- openstack_version: ocata
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_notification_smtp_use_tls: 'False'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_host: 127.0.0.1
- oss_pushkin_smtp_port: '587'
- oss_webhook_app_id: '24'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- salt_api_password: frJMLJsfGkFXCk4qTTEttKla518Akvdp
- salt_api_password_hash: $6$ixefPtcd$vasKuySO6L2JM0FKaB/udsQvH4upI2dWoJZaR9XTqeAx4UMvkHsNNSwsALVqTTLbXIngkFqYNXpbNm1o4iMGS.
- salt_master_address: 172.16.49.66
- salt_master_hostname: cfg01
- salt_master_management_address: 172.16.49.66
- shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- stacklight_enabled: 'True'
- stacklight_log_address: 10.167.8.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.8.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.8.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.8.63
- stacklight_log_node03_hostname: log03
- stacklight_long_term_storage_type: prometheus
- stacklight_monitor_address: 10.167.8.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.8.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.8.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.8.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 10.167.8.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.8.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.8.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.8.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.220
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 192.168.0.0/24
- tenant_vlan: '2423'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'True'
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml.back b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml.back
deleted file mode 100644
index 02bcbf2..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml.back
+++ /dev/null
@@ -1,191 +0,0 @@
-default_context:
- mcp_version: testing
- cicd_enabled: 'False'
- cluster_domain: cookied-bm-mcp-ocata-contrail.local
- cluster_name: deployment_name
- compute_bond_mode: active-backup
- compute_primary_first_nic: enp5s0f0
- compute_primary_second_nic: enp5s0f1
- context_seed: WCQ00jbWQE6qxjDdhHsS7SNGExTJ9HVanC9LXyJHF2IIe0Qj6vtaXFP5FSwEK6jm
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.8.0/24
- control_vlan: '2422'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 172.16.49.126
- deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.49.64/26
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: sgudz@mirantis.com
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.8.241
- infra_kvm01_deploy_address: 172.16.49.67
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.8.242
- infra_kvm02_deploy_address: 172.16.49.68
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.8.243
- infra_kvm03_deploy_address: 172.16.49.69
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.8.240
- infra_primary_first_nic: eth2
- infra_primary_second_nic: eth3
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 10.167.8.91
- maas_hostname: cfg01
- opencontrail_analytics_address: 10.167.8.30
- opencontrail_analytics_hostname: nal
- opencontrail_analytics_node01_address: 10.167.8.31
- opencontrail_analytics_node01_hostname: nal01
- opencontrail_analytics_node02_address: 10.167.8.32
- opencontrail_analytics_node02_hostname: nal02
- opencontrail_analytics_node03_address: 10.167.8.33
- opencontrail_analytics_node03_hostname: nal03
- opencontrail_compute_iface_mask: '24'
- opencontrail_control_address: 10.167.8.20
- opencontrail_control_hostname: ntw
- opencontrail_control_node01_address: 10.167.8.21
- opencontrail_control_node01_hostname: ntw01
- opencontrail_control_node02_address: 10.167.8.22
- opencontrail_control_node02_hostname: ntw02
- opencontrail_control_node03_address: 10.167.8.23
- opencontrail_control_node03_hostname: ntw03
- opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.100
- opencontrail_router01_hostname: rtr01
- opencontrail_router02_address: 10.167.8.101
- opencontrail_router02_hostname: rtr02
-
- openstack_benchmark_node01_address: 10.167.8.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_compute_count: '1'
- openstack_compute_rack01_hostname: cmpt
- openstack_compute_rack01_single_subnet: 10.167.8
- openstack_compute_rack01_tenant_subnet: 192.168.0
-
- openstack_compute_node01_hostname: cmp001
- openstack_compute_node02_hostname: cmp002
- openstack_compute_node01_address: 10.167.8.101
- openstack_compute_node02_address: 10.167.8.102
- openstack_compute_node01_single_address: 10.167.8.101
- openstack_compute_node02_single_address: 10.167.8.102
- openstack_compute_node01_deploy_address: 172.16.49.73
- openstack_compute_node02_deploy_address: 172.16.49.74
-
- openstack_control_address: 10.167.8.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.8.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.8.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.8.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.8.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.8.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.8.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.8.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_message_queue_address: 10.167.8.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.8.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.8.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.8.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: opencontrail
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_proxy_address: 10.167.8.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.8.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.8.82
- openstack_proxy_node02_hostname: prx02
- openstack_version: ocata
- oss_enabled: 'False'
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_master_address: 10.167.8.66
- salt_master_hostname: cfg01
- salt_master_management_address: 172.16.49.66
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- stacklight_enabled: 'True'
- stacklight_log_address: 10.167.8.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.8.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.8.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.8.63
- stacklight_log_node03_hostname: log03
- stacklight_monitor_address: 10.167.8.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.8.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.8.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.8.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 10.167.8.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.8.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.8.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.8.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- tenant_network_gateway: ''
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 192.168.0.0/24
- tenant_vlan: '2423'
- upstream_proxy_enabled: 'False'
- openstack_nova_compute_hugepages_count: 40
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-environment.yaml
deleted file mode 100644
index c37939c..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-environment.yaml
+++ /dev/null
@@ -1,247 +0,0 @@
-nodes:
- # Virtual Control Plane nodes
-
- ctl01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_ctl
-
- ctl02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ctl03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- prx01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- prx02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- nal01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node01
- roles:
- - opencontrail_analytics
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- nal02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node02
- roles:
- - opencontrail_analytics
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- nal03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node03
- roles:
- - opencontrail_analytics
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- ntw01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node01
- roles:
- - opencontrail_control
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- ntw02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node02
- roles:
- - opencontrail_control
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- ntw03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node03
- roles:
- - opencontrail_control
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_ctl
-
- mtr01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
-# bmk01.cookied-bm-mcp-ocata-contrail.local:
-# reclass_storage_name: openstack_benchmark_node01
-# roles:
-# - openstack_benchmark
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
deleted file mode 100644
index 1fa16cf..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
+++ /dev/null
@@ -1,185 +0,0 @@
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import CUSTOM_VCP_TRUSTY_IMAGE_URL with context %}
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import CUSTOM_VCP_XENIAL_IMAGE_URL with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-mcp-ocata-contrail') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-- description: "Workaround for rack01 compute generator"
- cmd: |
- set -e;
- # Remove rack01 key
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- # Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{%- if CUSTOM_VCP_TRUSTY_IMAGE_URL != '' %}
-
-- description: "Change trusty image to custom"
- cmd: |
- echo "CUSTOM_TRUSTY_IMAGE is {{ CUSTOM_VCP_TRUSTY_IMAGE_URL }}";
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.salt_control_trusty_image "{{ CUSTOM_VCP_TRUSTY_IMAGE_URL }}" /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{%- endif %}
-
-{%- if CUSTOM_VCP_XENIAL_IMAGE_URL != '' %}
-
-- description: "Change xenial image to custom"
- cmd: |
- echo "CUSTOM_XENIAL_IMAGE is {{ CUSTOM_VCP_XENIAL_IMAGE_URL }}";
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.salt_control_xenial_image "{{ CUSTOM_VCP_XENIAL_IMAGE_URL }}" /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{%- endif %}
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Temporary WR for correct bridge name according to envoronment templates
- cmd: |
- sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-- description: Update minion information
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun openssh after env model is generated
- cmd: |
- salt-call state.sls openssh
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.network.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-########################################
-# Spin up Control Plane VMs on KVM nodes
-########################################
-
-- description: Execute 'libvirt' states to create necessary libvirt networks
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Create VMs for control plane
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
- cmd: |
- salt-key -l acc| sort > /tmp/current_keys.txt &&
- salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 20, delay: 30}
- skip_fail: false
-
-#########################################
-# Configure all running salt minion nodes
-#########################################
-
-- description: Hack resolv.conf on VCP nodes for internal services access
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Refresh pillars on all minions
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync all salt resources
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Show reclass-salt --top for generated nodes
- cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: "Lab04 workaround: Give each node root acces with key from cfg01"
- cmd: |
- set -e;
- set -x;
- key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
- salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
- salt '*' cmd.run "service sshd restart"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: "Lab04 workaround: Control network access from cfg01 node using sshuttle via kvm01"
- cmd: |
- set -e;
- set -x;
- KVM01_DEPLOY_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:infra_kvm_node01_deploy_address);
- apt-get install -y sshuttle;
- sshuttle -r ${KVM01_DEPLOY_ADDRESS} 10.167.8.0/24 -D >/dev/null;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
deleted file mode 100644
index 795f98a..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-sl.yaml' as SHARED_SL with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
-{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
-{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
-{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
-{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
-{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
-{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
-{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--meta-data.yaml
deleted file mode 100644
index a594a53..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
deleted file mode 100644
index cde8295..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604-hwe.yaml
deleted file mode 100644
index 106c3d5..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604-hwe.yaml
+++ /dev/null
@@ -1,99 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup {interface_name}
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- # - echo "Preparing base OS"
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- # Install latest kernel
- # - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- # - reboot
- ########################################################
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- # The loopback network interface
- auto lo
- iface lo inet loopback
- auto {interface_name}
- iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml
deleted file mode 100644
index 915981e..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,95 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup {interface_name}
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- # - echo "Preparing base OS"
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- ########################################################
- # Node is ready, allow SSH access
- # - echo "Allow SSH access ..."
- # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- # The loopback network interface
- auto lo
- iface lo inet loopback
- auto {interface_name}
- iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
deleted file mode 100644
index 3de4ae9..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
+++ /dev/null
@@ -1,495 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% set CUSTOM_VCP_TRUSTY_IMAGE_URL = os_env('CUSTOM_VCP_TRUSTY_IMAGE_URL', '') %}
-{% set CUSTOM_VCP_XENIAL_IMAGE_URL = os_env('CUSTOM_VCP_XENIAL_IMAGE_URL', '') %}
-
-#{# set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' #}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-mcp-ocata-contrail') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
-# {# set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) #}
-# {# set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) #}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
-{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
-{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.68') %}
-{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.69') %}
-{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.73') %}
-{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
-# {# set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.167.140') #}
-# {# set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.49.5') #}
-# {# set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') #}
-
-{% import 'cookied-bm-mcp-ocata-contrail/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
-{% import 'cookied-bm-mcp-ocata-contrail/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_HWE with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
- - &cloudinit_user_data_hwe {{ CLOUDINIT_USER_DATA_HWE }}
-
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-bm-mcp-ocata-contrail_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
- params:
- ip_reserved:
- gateway: +62
- l2_network_device: +61
- default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
- default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
- default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
- default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
- default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
- default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
- default_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
- # default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
- # default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
- virtual_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
- virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
- virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
- virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
- virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
- virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
- # virtual_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
- # virtual_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
- # virtual_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
- #ip_ranges:
- # dhcp: [+2, -4]
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: -2
-
- groups:
-
- - name: virtual
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-
- network_pools:
- admin: admin-pool01
-
- l2_network_devices:
- # Ironic management interface
- admin:
- address_pool: admin-pool01
- dhcp: false
- parent_iface:
- phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
- #- label: ens4
- # l2_network_device: private
- # interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- #ens4:
- # networks:
- # - private
-
-
- - name: default
- driver:
- name: devops_driver_ironic
- params:
- os_auth_token: fake-token
- ironic_url: !os_env IRONIC_URL # URL that will be used by fuel-devops
- # to access Ironic API
- # Agent URL that is accessible from deploying node when nodes
- # are bootstrapped with PXE. Usually PXE/provision network address is used.
- agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
- agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
-
- network_pools:
- admin: admin-pool01
-
- nodes:
-
- # - name: {{ HOSTNAME_CFG01 }}
- # role: salt_master
- # params:
- # ipmi_user: !os_env IPMI_USER
- # ipmi_password: !os_env IPMI_PASSWORD
- # ipmi_previlegies: OPERATOR
- # ipmi_host: !os_env IPMI_HOST_CFG01 # hostname or IP address
- # ipmi_lan_interface: lanplus
- # ipmi_port: 623
-
- # root_volume_name: system # see 'volumes' below
- # cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: enp3s0f1 # see 'interfaces' below.
- # volumes:
- # - name: system
- # capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # # The same as for agent URL, here is an URL to the image that should be
- # # used for deploy the node. It should also be accessible from deploying
- # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
-
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data_cfg01
-
- # interfaces:
- # - label: enp3s0f0 # Infra interface
- # mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
- # - label: enp3s0f1
- # l2_network_device: admin
- # mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
-
- # network_config:
- # enp3s0f0:
- # networks:
- # - infra
- # enp3s0f1:
- # networks:
- # - admin
-
- - name: {{ HOSTNAME_KVM01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp9s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
- - label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
-
- network_config:
- enp9s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
-
- - name: {{ HOSTNAME_KVM02 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp9s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
- - label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
-
- network_config:
- enp9s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
-
- - name: {{ HOSTNAME_KVM03 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM03 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: eno1 # see 'interfaces' below.
- cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- # - label: eno1
- - label: enp9s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
- # - label: eno2
- - label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
-
- network_config:
- # eno1:
- enp9s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
-
-
- - name: {{ HOSTNAME_CMP001 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP001 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
- cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_hwe
-
- interfaces:
- - label: enp2s0f0
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
- - label: enp2s0f1
- l2_network_device: admin
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
- - label: enp5s0f0
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
- features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
- - label: enp5s0f1
- mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
- features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
- # - label: enp5s0f2
- # mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
- # features: ['dpdk', 'dpdk_pci: 0000:05:00.2']
-
- network_config:
- enp2s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp5s0f0
- - enp5s0f1
-
-
-
- - name: {{ HOSTNAME_CMP002 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP002 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: eno1 # see 'interfaces' below.
- cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_hwe
-
- interfaces:
- # - label: eno1
- - label: enp2s0f0
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
- # - label: eth0
- - label: enp2s0f1
- l2_network_device: admin
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
- # - label: eth3
- - label: enp5s0f0
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
- features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
- # - label: eth2
- - label: enp5s0f1
- mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
- features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
- # - label: eth4
- # mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
- # features: ['dpdk', 'dpdk_pci: 0000:0b:00.0']
-
- network_config:
- enp2s0f1:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp5s0f0
- - enp5s0f1
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/core.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/core.yaml
deleted file mode 100644
index 4d9af8c..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/core.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-{% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
deleted file mode 100644
index d2d414b..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-{% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
deleted file mode 100644
index c137d12..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
+++ /dev/null
@@ -1,206 +0,0 @@
-default_context:
- backup_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEogIBAAKCAQEAvDqzt/PHWvSSJrBtvD3UWp21CDrAajgOPeXrVm7VU+sDDpw0
- YqDvVhYfT/N6ha+SWOK00KyTuMMbB8/I4tvsP9vvCXy7v2AJID/ZO2z/t8KfTDEJ
- C75/6a0UBg6sl3i7+cUOHbeK+lkcfdnSI1H8Jzdhk4Xj7t7q+MIKTs5n+AlScgyz
- NSiD2nG/U5LmaQ+rjZ1VsF9J0YTds0yLDF3hztVoDTs7j5obl7Xab3ZlwalgH5Gc
- Id6BI09jkUbppdPzHpzV2oad7cFpVYTt9m3/MMT0amzPuwl/u/dI64rRuWPe60eT
- qeVMQD0zP6o9F79upbzQStt82lPJcfF4CXvxYwIDAQABAoIBAAHUXDzUfMKQj/8a
- RebHfxHmaIUM9SPTKahGXNQ5PY+UQDJbKFMxF0Jx8pn3VuCHxVdh1LoWg1UPaGra
- BSzXUGOKgrdH5BdHGq+aj0T5mT6zAJNgAqN/lYSy7vfkGp9aSBF0bd+yEgK+7Pz4
- Kge320iSTDt/2KhQuF30nN8JOI97m2uk2YHH8TixfVtmgLPEy+0Mw4VZLsHD4OY1
- zu8xN6co2aQR0DB0MPKD6IxH62wSOJKBzF4o5xzzy/fl0ysDZbZ8Z/5Rejvp3yNT
- 68B0X5CM27hVdYE+/tcKGl9WKmewIf3fTZUfBcwFIObMIl9fkK/519nwFed4AfOX
- /a2LCBECgYEA9Lyl/eyzXuU2dgs6Gw/WMobqOVnHF9wbukS1XSKdwMogtpt7Pb23
- +32r9xHgeRDvvWwSp8lNPZ8mu77dQ6akbOuOk5C6V3Mqt4zam3DBDMtL63nKq8tq
- LQ0PVjj8cAgu3GSDCz7htqUb44rn5tX9zlM0vrwxzyYqbet7ZbsGoYsCgYEAxORQ
- JFn1vwag8VBw3bngx3SJ46CpCC8Gz830W7pEaTS6zTTiDC4p5sATGya91JS8l47G
- ikP2bcWzvT6aP/u+TZSqZiqp5Kn37fx8Et+ltIl47SH+PJHIR9F9r2f9zqla6mlP
- zcX/mTSuAJCTP4whQA3/f1wNAeBnewhK9fXCOokCgYAz6TPYSXW+giXsIfOAO/q2
- GvHhmk5lnDVxbBOAHtCRTQ5lTVN1xCEbQgvQy0TuyQ3hAuRuHH+6u2BO4Gw0Zkto
- IwrJ+B/eXrpH1qOj5uW73f9Lgjjf+bSau7NuGYZKCSJPcy5smzjrMdhZimQoDWnZ
- csK0VlzGUJUdXZ599I6ygwKBgGTf+LN3J7H0Snb4WKsw9Zoa+h6WjKO1vE6xXVW1
- rCEes+o5Autsp2ki1WcexTlp7unTa6MhSNta5Ei8Dzli2FBVL6xihWKzNmRG7Kaa
- 0QIbQMp1lRUhN7Sb/0HkDKRaHktlI07w95Bd7hw59kcjm1F/Gnz9A2kHuNzPFeDI
- RffJAoGAdeCID5sb0oHEHTIxxB+cgfaiyaAe9qrW2INNWLVn5OTDh6cidatnWAor
- M/SxwNoiYcCpi869q7wzjw5gNOVoNJbmwzDA7s+lgjTPQpq2jmO6RtweKbYoN5Zw
- ++LiD3r07TD3p2QAyeooT29D/d6/2Hd6oyTJcZWIQTN+MTcXQO4=
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8OrO388da9JImsG28PdRanbUIOsBqOA495etWbtVT6wMOnDRioO9WFh9P83qFr5JY4rTQrJO4wxsHz8ji2+w/2+8JfLu/YAkgP9k7bP+3wp9MMQkLvn/prRQGDqyXeLv5xQ4dt4r6WRx92dIjUfwnN2GThePu3ur4wgpOzmf4CVJyDLM1KIPacb9TkuZpD6uNnVWwX0nRhN2zTIsMXeHO1WgNOzuPmhuXtdpvdmXBqWAfkZwh3oEjT2ORRuml0/MenNXahp3twWlVhO32bf8wxPRqbM+7CX+790jritG5Y97rR5Op5UxAPTM/qj0Xv26lvNBK23zaU8lx8XgJe/Fj
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_control_node01_address: 10.167.11.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.11.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.11.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.11.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAshiE2vK11KH1/PHO9v5IcT1ol3kuAorv6IgW+1paT9w4pFnd
- H2DHQxTJsZ629cig+ELVAKHQnkND2U++/DM20ai5ZfpOwlvd+dL95/FbGb62Ozxx
- kxBjyc/Bbbs8LcZtS1SN+agdkjQG1StpckUbFppoJ9nzWgnEcdYdonQ6aThgd+YL
- rAOX04s3cMlCflClQl3lGFo24Qdhk/Y4M5rodfqfD5NOSKEhYP/dTMunri8zB5bU
- ifvOvCWUKUOxLjkx95raY82xMHUobMYk87RcLPcq8pyz96/FPoiLqxM1oznTKNiI
- 0bW0xjf7FFjfLCjTapKZPRz8+Wkvzmzj35LLrwIDAQABAoIBADJoGCo0Kdy93nay
- JgboX66VV+YPaUNU+aQR6JdJsmgKB4oU2S4JYTyORKveJSCZoV3C5LCiG/6/QRPf
- q0mMYUaj/51qZCJEiCYuXqjoOgWmYcOQTwD10ZiMEc4yAU1fbQ22J9zyhTQdP5XU
- DKtH/eu+1h35ZRQl0ZD6rjaNuP6QekJM6IVCC7XBaCG5+wSER9R25HbbDhdb7CwH
- W1GP9IgISqy9Z3f4PQOyCUmn/O99lN8kry6ui7bCywRfITV6C+pn02DpMgzKZ8jn
- 3yts1f2mIbYVxnahtCaI3QTag6wBsnFq+U0uIXrUGMeeRzg9N1Ur01qdJpIR9g0v
- Nt7QUZkCgYEA4lEavsFitSll/33JY4q82nYpjXAxTON3xraUqNYn5Cde06gNi8n1
- t9TCLUqDhSpvgEOyJE/uwo5LAj79Ce2EwLkCttNggqRXBlY5ZpljwfWmxZtuGm/z
- BJaOtkaK/1diR/+Qn/fTMyPH5JIXuQ6/XF60W4MSbzPgY4GO1BDx+G0CgYEAyXRT
- 00GDdiXbxQmzeHTO9Bg5y36Y1FEWDLnc89bpHPTR4sT/XCczerevy/l8jsdzZlnu
- 5ZddfWMF7EGNo51Zbmi0oLQ7nzigoVFcnhFHRFoCP36T9mvJk7O8Ao3ttpl/J2r0
- mFiaKi0lhmZVbNpmliKjWAMZJyt6I7AfYekcOwsCgYEA0W8MuQptNgkhgtX80ElL
- iz9eJK12chjuds3vtG66a8CjWGtkXcB/y6bwKsmR/GHQ3XnIGSJ/aTwU3fc8YzuS
- ZmbPxDDIVx2OCycv52p7jrqtoqC7u2tuEQji+Hs/lhxfrxEp3V+R6vlpunQX0AF9
- xRU/ApDBNndjZ7I1YrprseECgYA+zx8HgaiMIJeZokGrb7fKkPcMBCeAItveEeDa
- wYmito3txv/a6nn5a+XKkbmNBpBrO+To/j1ux33kQDf56Cgm7UxLwoXISa6DPUvE
- GJ0AqZOD2mIldUu+2k3m+ftAcDEdyBIEobNHLRZDBgriSmGrs5b77NNdzAdjsxjF
- vRlJKwKBgD8DcP/C9pABC2mRQyH//RTk6XZfiDY0L18lwH7acEdHlJiF1PTwvIHD
- cj1nMyG2MxEiSt1E5O/YQ4Lo3sognFIb8keu7IYxEgLXhvWFR3RwaYCjrF4ZGfD2
- +83eUFPZQvEwTY/8OCogzJQfs1CT8+pLdO9tZQbrAaxfmF6c48KN
- -----END RSA PRIVATE KEY-----
- cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyGITa8rXUofX88c72/khxPWiXeS4Ciu/oiBb7WlpP3DikWd0fYMdDFMmxnrb1yKD4QtUAodCeQ0PZT778MzbRqLll+k7CW9350v3n8VsZvrY7PHGTEGPJz8Ftuzwtxm1LVI35qB2SNAbVK2lyRRsWmmgn2fNaCcRx1h2idDppOGB35gusA5fTizdwyUJ+UKVCXeUYWjbhB2GT9jgzmuh1+p8Pk05IoSFg/91My6euLzMHltSJ+868JZQpQ7EuOTH3mtpjzbEwdShsxiTztFws9yrynLP3r8U+iIurEzWjOdMo2IjRtbTGN/sUWN8sKNNqkpk9HPz5aS/ObOPfksuv
- cluster_domain: cookied-bm-mcp-ovs-dpdk.local
- cluster_name: cookied-bm-mcp-ovs-dpdk
- compute_bond_mode: balance-slb
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: zEFbUBMME6LFdiL0rJWFgHMdQGgywnDSE9vFYvHgEBeYHb4QJsDl3HqpdaTgqYlF
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.11.0/24
- control_vlan: '2416'
- cookiecutter_template_branch: proposed
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 172.16.49.62
- deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.49.0/26
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: obutenko@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.11.241
- infra_kvm01_deploy_address: 172.16.49.11
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.11.242
- infra_kvm02_deploy_address: 172.16.49.12
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.11.243
- infra_kvm03_deploy_address: 172.16.49.13
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.11.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 172.16.49.15
- maas_hostname: cfg01
- mcp_version: testing
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openldap_domain: deploy-name.local
- openldap_enabled: 'True'
- openldap_organisation: ${_param:cluster_name}
- openstack_benchmark_node01_address: 10.167.11.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 10.167.11.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.11.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.11.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.11.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.11.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.11.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.11.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.11.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 10.167.11.224
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.167.12.6
- openstack_gateway_node02_address: 10.167.11.225
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.167.12.7
- openstack_gateway_node03_address: 10.167.11.226
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.167.12.8
- openstack_message_queue_address: 10.167.11.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.11.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.11.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.11.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'True'
- openstack_nfv_sriov_enabled: 'True'
- openstack_nova_compute_hugepages_count: '16'
- openstack_nova_compute_nfv_req_enabled: 'True'
- openstack_nfv_sriov_network: physnet2
- openstack_nfv_sriov_numvfs: '7'
- openstack_nfv_sriov_pf_nic: enp5s0f1
- openstack_nova_cpu_pinning: 6,7,8,9,10,11
- openstack_nova_compute_reserved_host_memory_mb: '900'
- openstack_ovs_dvr_enabled: 'False'
- openstack_ovs_encapsulation_type: vlan
- openstack_ovs_encapsulation_vlan_range: 2418:2420
- openstack_proxy_address: 10.167.11.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.11.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.11.82
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.11.19
- openstack_version: pike
- cinder_version: ${_param:openstack_version}
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- salt_api_password: HlcaUHzUnsWsg62uhF8ua5KEbqRbzijz
- salt_api_password_hash: $6$qdIFillN$XnzP7oIXRcbroVch7nlthyrSekjKlWND8q2MtoMF3Wz2ymepjAOjyqpyR55nmbH9OQzS8EcQJ6sfr5hWKDesV1
- salt_master_address: 10.167.11.2
- salt_master_hostname: cfg01
- salt_master_management_address: 172.16.49.2
- shared_reclass_branch: proposed
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- stacklight_enabled: 'False'
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.167.12.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.167.12.0/24
- tenant_vlan: '2417'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'True'
- sriov_network_subnet: 192.168.10.0/24
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
deleted file mode 100644
index 61db010..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
+++ /dev/null
@@ -1,117 +0,0 @@
-nodes:
- cfg01.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_static_ctl
-
- # Physical nodes
- kvm01.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp3s0f0:
- role: single_mgm
- enp3s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm02.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp3s0f0:
- role: single_mgm
- enp3s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm03.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp3s0f0:
- role: single_mgm
- enp3s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- cmp01.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_node01
- roles:
- - openstack_compute_dpdk
- - features_lvm_backend
- - linux_system_codename_xenial
- - openstack_compute_sriov
- interfaces:
- enp5s0f0:
- role: combined_vlan_ctl_mgm
- single_address: 10.167.11.105
- enp3s0f0:
- role: bond_dpdk_prv_lacp
- dpdk_pci: "0000:03:00.0"
- tenant_address: 10.167.12.105
- enp3s0f1:
- role: bond_dpdk_prv_lacp
- dpdk_pci: "0000:03:00.1"
- # Remove this interface after switching to reclass 1.5.x
- # in favor to system.nova.compute.nfv.sriov
- enp5s0f1:
- role: sriov
-
- cmp02.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_node02
- roles:
- - openstack_compute_dpdk
- - features_lvm_backend
- - linux_system_codename_xenial
- - openstack_compute_sriov
- interfaces:
- enp5s0f0:
- role: combined_vlan_ctl_mgm
- single_address: 10.167.11.106
- enp3s0f0:
- role: bond_dpdk_prv_lacp
- dpdk_pci: "0000:03:00.0"
- tenant_address: 10.167.12.106
- enp3s0f1:
- role: bond_dpdk_prv_lacp
- dpdk_pci: "0000:03:00.1"
- # Remove this interface after switching to reclass 1.5.x
- # in favor to system.nova.compute.nfv.sriov
- enp5s0f1:
- role: sriov
-
- gtw01.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- enp3s0f0:
- role: single_mgm
- deploy_address: 172.16.49.5
- enp3s0f1:
- role: bond0_ab_dvr_vlan_ctl_prv_floating
-
- gtw02.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node02
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- interfaces:
- enp3s0f0:
- role: single_mgm
- deploy_address: 172.16.49.4
- enp3s0f1:
- role: bond0_ab_dvr_vlan_ctl_prv_floating
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-vcp-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-vcp-environment.yaml
deleted file mode 100644
index fc6079b..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-vcp-environment.yaml
+++ /dev/null
@@ -1,175 +0,0 @@
-nodes:
- ctl01.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ctl02.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ctl03.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- dbs01.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- dbs02.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- dbs03.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- msg01.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- msg02.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- msg03.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- prx01.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- prx02.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
-# mtr01.cookied-bm-mcp-ovs-dpdk.local:
-# reclass_storage_name: stacklight_telemetry_node01
-# roles:
-# - stacklight_telemetry
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
-
-# mtr02.cookied-bm-mcp-ovs-dpdk.local:
-# reclass_storage_name: stacklight_telemetry_node02
-# roles:
-# - stacklight_telemetry
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
-
-# mtr03.cookied-bm-mcp-ovs-dpdk.local:
-# reclass_storage_name: stacklight_telemetry_node03
-# roles:
-# - stacklight_telemetry
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
-
- cid01.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: cicd_control_node01
- roles:
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- cid02.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: cicd_control_node02
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- cid03.cookied-bm-mcp-ovs-dpdk.local:
- reclass_storage_name: cicd_control_node03
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
deleted file mode 100644
index ce9318e..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
+++ /dev/null
@@ -1,157 +0,0 @@
-{% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-baremetal-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-- description: "Workaround for PROD-22201 (remove after switching to reclass 1.5.x) - Remove linux.network.interface object from the system models and use fixed 'environment' model instead"
- cmd: |
- set -e;
- apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
- [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
- . /root/venv-reclass-tools/bin/activate;
- pip install git+https://github.com/dis-xcom/reclass-tools;
- reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/system/;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Temporary workaround for removing virtual gtw nodes
- cmd: |
- sed -i 's/\-\ system\.salt\.control\.sizes\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- sed -i 's/\-\ system\.salt\.control\.placement\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: WR for mounting 1G hugepages before linux.state
- cmd: |
- salt 'cmp*' state.sls linux.system.hugepages;
- salt 'cmp*' cmd.run "mount -o mode=775,pagesize=1G -t hugetlbfs Hugetlbfs-kvm /mnt/hugepages_1G";
- salt 'cmp*' cmd.run "echo 16 | sudo tee /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: WR for correct acces to git repo from jenkins on cfg01 node
- cmd: |
- git clone --mirror https://github.com/Mirantis/mk-pipelines.git /home/repo/mk/mk-pipelines/;
- git clone --mirror https://github.com/Mirantis/pipeline-library.git /home/repo/mcp-ci/pipeline-library/;
- chown -R git:www-data /home/repo/mk/mk-pipelines/*;
- chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-########################################
-# Spin up Control Plane VMs on KVM nodes
-########################################
-
-- description: Execute 'libvirt' states to create necessary libvirt networks
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Temporary WR for correct bridge name according to envoronment templates
- cmd: |
- sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
- salt '*' saltutil.refresh_pillar;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Create VMs for control plane
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
- cmd: |
- salt-key -l acc| sort > /tmp/current_keys.txt &&
- salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 20, delay: 30}
- skip_fail: false
-
-#########################################
-# Configure all running salt minion nodes
-#########################################
-
-- description: Refresh pillars on all minions
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync all salt resources
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Show reclass-salt --top for generated nodes
- cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Add cpm nodes to /etc/hosts
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo '10.167.11.105 cmp01.cookied-bm-mcp-ovs-dpdk.local cmp01' >> /etc/hosts";
- salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo '10.167.11.106 cmp02.cookied-bm-mcp-ovs-dpdk.local cmp02' >> /etc/hosts";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: "Give each node root access with key from cfg01"
- cmd: |
- set -e;
- set -x;
- key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
- salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
- salt '*' cmd.run "service sshd restart"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48bf712..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "Preparing base OS"
-
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data1604-hwe.yaml
deleted file mode 100644
index 44ae1f5..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data1604-hwe.yaml
+++ /dev/null
@@ -1,126 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup {interface_name}
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- # - echo "Preparing base OS"
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- # Install latest kernel
- # - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- # - reboot
- ########################################################
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- # The loopback network interface
- auto lo
- iface lo inet loopback
- auto {interface_name}
- iface {interface_name} inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data1604.yaml
deleted file mode 100644
index b39b37a..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,87 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- # The loopback network interface
- auto lo
- iface lo inet loopback
- auto {interface_name}
- iface {interface_name} inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
deleted file mode 100644
index 15e22ba..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
+++ /dev/null
@@ -1,494 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-mcp-ovs-dpdk') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.2') %}
-{% set ETH2_IP_ADDRESS_CFG01 = os_env('ETH2_IP_ADDRESS_CFG01', '10.167.11.253') %}
-{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.11') %}
-{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.12') %}
-{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.13') %}
-{% set ETH0_IP_ADDRESS_CMP01 = os_env('ETH0_IP_ADDRESS_CMP01', '172.16.49.3') %}
-{% set ETH0_IP_ADDRESS_CMP02 = os_env('ETH0_IP_ADDRESS_CMP02', '172.16.49.31') %}
-{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.49.5') %}
-{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
-
-{% import 'cookied-bm-mcp-ovs-dpdk/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-bm-mcp-ovs-dpdk/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'cookied-bm-mcp-ovs-dpdk/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_1604_HWE with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_1604_hwe {{ CLOUDINIT_USER_DATA_1604_HWE }}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.0/26:26') }}
- params:
- ip_reserved:
- gateway: +62
- l2_network_device: +61
- virtual_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
- default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
- default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
- default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
- default_{{ HOSTNAME_CMP01 }}: {{ ETH0_IP_ADDRESS_CMP01 }}
- default_{{ HOSTNAME_CMP02 }}: {{ ETH0_IP_ADDRESS_CMP02 }}
- default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
- default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
- ip_ranges:
- dhcp: [+2, -4]
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.11.0/24:24') }}
- params:
- ip_reserved:
- virtual_{{ HOSTNAME_CFG01 }}: {{ ETH2_IP_ADDRESS_CFG01 }}
- gateway: +1
- l2_network_device: +1
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.12.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.128/26:26') }}
- params:
- ip_reserved:
- gateway: '172.17.42.129'
- ip_ranges:
- dhcp: ['172.17.42.130', '172.17.42.180']
-
- groups:
- - name: virtual
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
-
- l2_network_devices:
- # Ironic management interface
- admin:
- address_pool: admin-pool01
- dhcp: false
- parent_iface:
- phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
- private:
- parent_iface:
- phys_dev: !os_env CONTROL_IFACE
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
-
- - name: default
- driver:
- name: devops_driver_ironic
- params:
- os_auth_token: fake-token
- ironic_url: !os_env IRONIC_URL # URL that will be used by fuel-devops
- # to access Ironic API
- # Agent URL that is accessible from deploying node when nodes
- # are bootstrapped with PXE. Usually PXE/provision network address is used.
- agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
- agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
-
- network_pools:
- admin: admin-pool01
-
- nodes:
- - name: {{ HOSTNAME_KVM01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces:
- - label: enp3s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
- - label: enp3s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
-
- network_config:
- enp3s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp3s0f1
-
- - name: {{ HOSTNAME_KVM02 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces:
- - label: enp3s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
- - label: enp3s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
-
- network_config:
- enp3s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp3s0f1
-
- - name: {{ HOSTNAME_KVM03 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM03 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces:
- - label: enp3s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
- - label: enp3s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
-
- network_config:
- enp3s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp3s0f1
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp5s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
- interfaces:
- - label: enp3s0f0
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP01
- - label: enp3s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP01
- - label: enp5s0f0
- l2_network_device: admin
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP01
- - label: enp5s0f1
- mac_address: !os_env ETH3_MAC_ADDRESS_CMP01
- network_config:
- enp5s0f0:
- networks:
- - admin
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp5s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
- interfaces:
- - label: enp3s0f0
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP02
- - label: enp3s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP02
- - label: enp5s0f0
- l2_network_device: admin
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP02
- - label: enp5s0f1
- mac_address: !os_env ETH3_MAC_ADDRESS_CMP02
- network_config:
- enp5s0f0:
- networks:
- - admin
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_GTW01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
- interfaces:
- - label: enp3s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_GTW01
- - label: enp3s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_GTW01
-
- network_config:
- enp3s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp3s0f1
-
- - name: {{ HOSTNAME_GTW02 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_GTW02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
- interfaces:
- - label: enp3s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_GTW02
- - label: enp3s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_GTW02
-
- network_config:
- enp3s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp3s0f1
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml
index c75c6d5..42d6de1 100644
--- a/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml
@@ -276,7 +276,7 @@
- description: Download cirros image for runtest
cmd: |
- wget http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img -O /tmp/TestCirros-0.3.5.img
+ wget http://172.19.112.216:8099/cirros-0.3.5-x86_64-disk.img -O /tmp/TestCirros-0.3.5.img
node_name: {{ HOSTNAME_CTL01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml b/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml
index f0d6d8a..85d4d67 100644
--- a/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml
@@ -4,9 +4,9 @@
- service.runtest.tempest.services.manila.glance
parameters:
_param:
- glance_image_cirros_location: http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img
- glance_image_fedora_location: http://cz8133.bud.mirantis.net:8099/Fedora-Cloud-Base-27-1.6.x86_64.qcow2
- glance_image_manila_location: http://cz8133.bud.mirantis.net:8099/manila-service-image-master.qcow2
+ glance_image_cirros_location: http://172.19.112.216:8099/cirros-0.3.5-x86_64-disk.img
+ glance_image_fedora_location: http://172.19.112.216:8099/Fedora-Cloud-Base-27-1.6.x86_64.qcow2
+ glance_image_manila_location: http://172.19.112.216:8099/manila-service-image-master.qcow2
openstack_public_neutron_subnet_allocation_end: 192.168.200.220
openstack_public_neutron_subnet_allocation_start: 192.168.200.130
openstack_public_neutron_subnet_cidr: 192.168.200.0/24
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/core.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/core.yaml
deleted file mode 100644
index c0c50a4..0000000
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/core.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-{% from 'cookied-bm-ocata-cicd-pipeline/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-- description: Approve cfg01 ssh key for jenkins user
- cmd: mkdir -p /var/lib/jenkins/.ssh && ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts && chown jenkins /var/lib/jenkins/.ssh/known_hosts
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Install jq for parse json output
- cmd: apt install -y jq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory-nfv.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory-nfv.yaml
deleted file mode 100644
index 0b4d3d0..0000000
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory-nfv.yaml
+++ /dev/null
@@ -1,145 +0,0 @@
-nodes:
- cfg01.ocata-cicd.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- # Physical nodes
-
- cid01.ocata-cicd.local:
- reclass_storage_name: cicd_control_node01
- roles:
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- enp2s0f0:
- role: single_mgm
- deploy_address: 172.16.49.70
- enp2s0f1:
- role: single_vlan_ctl
- single_address: 10.167.8.91
-
- cid02.ocata-cicd.local:
- reclass_storage_name: cicd_control_node02
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- enp2s0f0:
- role: single_mgm
- deploy_address: 172.16.49.71
- enp2s0f1:
- role: single_vlan_ctl
- single_address: 10.167.8.92
-
- cid03.ocata-cicd.local:
- reclass_storage_name: cicd_control_node03
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- enp2s0f0:
- role: single_mgm
- deploy_address: 172.16.49.72
- enp2s0f1:
- role: single_vlan_ctl
- single_address: 10.167.8.93
-
- kvm01.ocata-cicd.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm02.ocata-cicd.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm03.ocata-cicd.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- cmp001.ocata-cicd.local:
- reclass_storage_name: openstack_compute_node01
- roles:
- - openstack_compute_dpdk
- - features_lvm_backend
- - linux_system_codename_xenial
- - openstack_compute_sriov
- interfaces:
- enp2s0f0:
- role: single_mgm
- deploy_address: 172.16.49.73
- enp2s0f1:
- role: single_vlan_ctl
- single_address: 10.167.8.101
- enp5s0f0:
- role: single_ovs_dpdk_prv
- tenant_address: 192.168.0.101
- dpdk_pci: "0000:05:00.0"
-
- cmp002.ocata-cicd.local:
- reclass_storage_name: openstack_compute_node02
- roles:
- - openstack_compute_dpdk
- - features_lvm_backend
- - linux_system_codename_xenial
- - openstack_compute_sriov
- interfaces:
- enp2s0f0:
- role: single_mgm
- deploy_address: 172.16.49.74
- enp2s0f1:
- role: single_vlan_ctl
- single_address: 10.167.8.102
- enp5s0f0:
- role: single_ovs_dpdk_prv
- tenant_address: 192.168.0.102
- dpdk_pci: "0000:05:00.0"
-
- gtw01.ocata-cicd.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- enp9s0f0:
- role: single_mgm
- deploy_address: 172.16.49.75
- enp9s0f1:
- role: bond0_ab_dvr_vxlan_ctl_mesh_floating
-
-# gtw02.ocata-cicd.local:
-# reclass_storage_name: openstack_gateway_node02
-# roles:
-# - openstack_gateway
-# - linux_system_codename_xenial
-# interfaces:
-# enp10s0f0:
-# role: single_mgm
-# enp10s0f1:
-# role: bond0_ab_dvr_vlan_ctl_prv_floating
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory.yaml
deleted file mode 100644
index 143a83f..0000000
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory.yaml
+++ /dev/null
@@ -1,98 +0,0 @@
-nodes:
- cfg01.ocata-cicd.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- # Physical nodes
-
- kvm01.ocata-cicd.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm02.ocata-cicd.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm03.ocata-cicd.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_mgm
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- cmp001.ocata-cicd.local:
- reclass_storage_name: openstack_compute_node01
- roles:
- - openstack_compute
- - features_lvm_backend
- - linux_system_codename_xenial
- interfaces:
- enp2s0f0:
- role: single_mgm
- deploy_address: 172.16.49.73
- enp2s0f1:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- single_address: 10.167.8.101
- tenant_address: 192.168.0.101
-
- cmp002.ocata-cicd.local:
- reclass_storage_name: openstack_compute_node02
- roles:
- - openstack_compute
- - features_lvm_backend
- - linux_system_codename_xenial
- interfaces:
- enp2s0f0:
- role: single_mgm
- deploy_address: 172.16.49.74
- enp2s0f1:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- single_address: 10.167.8.102
- tenant_address: 192.168.0.102
-
- gtw01.ocata-cicd.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- enp9s0f0:
- role: single_mgm
- deploy_address: 172.16.49.75
- enp9s0f1:
- role: bond0_ab_dvr_vxlan_ctl_mesh_floating
-
-# gtw02.ocata-cicd.local:
-# reclass_storage_name: openstack_gateway_node02
-# roles:
-# - openstack_gateway
-# - linux_system_codename_xenial
-# interfaces:
-# enp10s0f0:
-# role: single_mgm
-# enp10s0f1:
-# role: bond0_ab_dvr_vlan_ctl_prv_floating
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml
deleted file mode 100644
index c8612bb..0000000
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml
+++ /dev/null
@@ -1,199 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_control_node01_address: 10.167.8.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.8.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.8.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.8.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEAxhMRv9nCgnLGtWFLfq8r97sPjEIA3eNvVfYWV87S2TjtiUyT
- Bknw/eXC0l2SGN3q7NXaA6sqtiVgk2dw0DfhGOKuWucr5C3pyRDMZOpj98gNzwNX
- X6kUmqS8MjALcpw68qVnq0cuQ0RNY8gxbLGIGWZx29AdcHtczWqce3LXDllVLp/U
- XLIcZuoB3Z2JvKWMo+QF7bKFJB8+O6tBXuRDJblVQFds/dG6cdLXGOTu3TvD/iu8
- FcDOPDqTff7j2669DmTV6NU2lzuCd5WOIaRmX0bmyOripMtTRZvj83W3ZvLZO/7I
- L9sXB1S/IpL09P1Fq5rMK3rDXBAS7CY/fBOBUwIDAQABAoIBAAyR4zt4l0ZuADw8
- e20Dstea0GbHPYTXTRZ4cnyKDIlqenCPInlwsdF7Bj0RYRcg5iq3e4lmEGTUxGc2
- VktwcGGC6SutpfRqEX8ICOCSm1t7H502ihHxCfSyZZsNv3w7e+YwJv8Qmlf8eqjN
- aI2MSqXKAYnAkE22FnKWxG11IT6wwgIEB01z5DbnNv3FToIOGAnX1wB8cOP5IS5C
- E89Q9X6YqB/k+L5Xms+WfkHiqDBlkGfH1M97CmjDrR1wDapqCxSVjt/qIaSBoyMi
- NiXweo+P1ZZs9myQbOMX/mS8svCEVM/EiQe6M+K4wSNZNxIzZWCozMHYTAQRIcfF
- SRIMMrECgYEA65ExKZuoi2MGTdqFH04sZuz6sH3hXI7er2ol6mmhXZALJWd+Sk9a
- j/eq9HKBGhc41LeowIYrcwMiUlXhoZ17AiGALgG1UG5btomvwADd9DuzmJYaZ8VO
- xiLG5Y1czIyh4JapPOdlP0oj2Sy+nlIsCCpmmYbZK+qvbkT0nnWJzskCgYEA10FY
- rENlLZ6L34f4c9Ow+GpI/z3+QP6By3xwOQTZOLV+gBQeOHO45TfIXT2hsAXbfcAA
- 0pvXeKhQ89a7A24L/3UCex/gV4BivghIVai1Lh6/++LC4s3Ue68+CQCm4Q1zTDqk
- GTVtEH7r2Bq9Wm08vPpkLwiHYOJhVGTGGvEpkTsCgYEAlv2WjHvoeYd8Z/ST9W/v
- B/4H5Y6aH/C3jao1KV6Rft4wNsZybYEVpEf0fQDT/Xw7AiXCdbzKJssweaPwnt3J
- FaGRfmu74xUJliQE0cX8fmqyADDeNfuDNX7fDA4jGD1gGQuY6J/NBtcnyTFj8Sjs
- bkN3RhroIr0nuz9ZqCPgs/kCgYEAt1s5fltWRzrDaOA4uek72Q8oKQuUlaZ1x3Cz
- Y06G/jBTliQM7gddGxueOBZ0sSz8H6y6xqvrKiMt+dcSrEREQhFY0KqBfeeltLv3
- acfwtV2KKbSqT2oHMmg/DooYnKHJcciN2c9RnPiQSx/T5cAhOdSMHChGsTeEss+4
- lGCTCNsCgYAm8R8A3XZ2TPl0iMM9LxUJGXBFs5r/GkeM1XMXQmnNDzoDbaLhkj/F
- 7UayG1rOOYl64oYmGZ4UPYTzyDDcb4m/ZiqQsoAmNjXoKykvMhhBnGUb4CZj0J/b
- QppRJ86CYR17df+W+4TLhXcn41afuO+8TCBLUKpb1jeJ5hifWctK5Q==
- -----END RSA PRIVATE KEY-----
- cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGExG/2cKCcsa1YUt+ryv3uw+MQgDd429V9hZXztLZOO2JTJMGSfD95cLSXZIY3ers1doDqyq2JWCTZ3DQN+EY4q5a5yvkLenJEMxk6mP3yA3PA1dfqRSapLwyMAtynDrypWerRy5DRE1jyDFssYgZZnHb0B1we1zNapx7ctcOWVUun9Rcshxm6gHdnYm8pYyj5AXtsoUkHz47q0Fe5EMluVVAV2z90bpx0tcY5O7dO8P+K7wVwM48OpN9/uPbrr0OZNXo1TaXO4J3lY4hpGZfRubI6uKky1NFm+Pzdbdm8tk7/sgv2xcHVL8ikvT0/UWrmswresNcEBLsJj98E4FT
- cluster_domain: ocata-cicd.local
- cluster_name: ocata-cicd
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: azAQwe19yxcUFK03MqWVyrJtQ6ZblWAFaDDiITqT9ed7jJnj6EdHQpgyizt1Gxdg
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.8.0/24
- control_vlan: '2422'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 172.16.49.126
- deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.49.64/26
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: sgudz@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.8.241
- infra_kvm01_deploy_address: 172.16.49.67
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.8.242
- infra_kvm02_deploy_address: 172.16.49.68
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.8.243
- infra_kvm03_deploy_address: 172.16.49.69
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.8.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 172.16.49.66
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openldap_domain: ocata-cicd.local
- openldap_enabled: 'True'
- openldap_organisation: ${_param:cluster_name}
- openstack_benchmark_node01_address: 10.167.8.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.8
- openstack_compute_rack01_tenant_subnet: 192.168.0
- openstack_control_address: 10.167.8.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.8.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.8.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.8.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.8.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.8.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.8.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.8.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 10.167.8.224
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 192.168.0.6
- openstack_gateway_node02_address: 10.167.8.225
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 192.168.0.7
- openstack_gateway_node03_address: 10.167.8.226
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 192.168.0.8
- openstack_message_queue_address: 10.167.8.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.8.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.8.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.8.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'True'
- openstack_nfv_sriov_enabled: 'True'
- openstack_nfv_sriov_network: physnet1
- openstack_nfv_sriov_numvfs: '7'
- openstack_nfv_sriov_pf_nic: enp5s0f1
- openstack_nova_compute_hugepages_count: '16'
- openstack_nova_compute_nfv_req_enabled: 'True'
- openstack_nova_cpu_pinning: 1,2,7,8
- openstack_ovs_dvr_enabled: 'False'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 10.167.8.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.8.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.8.82
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.8.19
- openstack_version: ocata
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: KEfAXxIWJykUBv0v8uKWdI2v4cBG5A07
- salt_api_password_hash: $6$XBCrfheG$2q48l7h1giiqF2sdp7CFtLQQi8pcMa6K5A8cPYQmhuGqJtzv08YXVqYyhkHARzl1VBLVf.aTMY6d0M5naM5WU0
- salt_master_address: 172.16.49.66
- salt_master_hostname: cfg01
- salt_master_management_address: 172.16.49.66
- shared_reclass_refspec: refs/changes/44/16144/1
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- stacklight_enabled: 'False'
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 192.168.0.0/24
- tenant_vlan: '2423'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'True'
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml
deleted file mode 100644
index e7584f8..0000000
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml
+++ /dev/null
@@ -1,274 +0,0 @@
-default_context:
- bmk_enabled: 'True'
- ceph_enabled: 'False'
- cicd_control_node01_address: 10.167.8.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.8.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.8.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.8.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEA0i/Kw7U6RvH3ELsRZwtfX5ZSaV2tkRiWqCgpSK7fvZn/YYuy
- f+DiMuviyb1a2rmgQgckJjGlQeEdtETyh+lAYUMCxcJUqiPMNtbs4GDqqpc91Nv+
- /Qa5JXGuInQC4L5MAx2BeJ4Swc1jnDMLXZ21zmcZwK3Uo5ENyhQqlwe2QZ+SzH76
- U7DQgSCU3xlI/ieBDiC4w4HLj3z3X36J6bb3x33DVh02UCaWCwdU0v+PGW+v4RBt
- RYMAuCnErOXGQysgwcpyqSpBvZ6QRdktIeAyK5X8RVLx7Tta5BPVB8jbseF/0N2C
- wxmovDbnUzJMF8lRUuuqXzKPfXeHjDbWWOpzeQIDAQABAoIBAQCdnXsmwWr/Mol8
- QVuucwfFDVulabszWEX5uYwj1gcwDiBFDNYBMSlO2DbL47QS2ypC+UnxXj83pNF6
- kk1w/8foZ7DXjX+hypBj+03MgMDYxet3CLYxFe6XFqVnbqmN0QX9PA4P3jHgpN1y
- j5CRVie0rPnR1Ejlk2vMwsyhTwYsNFmCqW0IC5qWHPyMGiOnFTcCYUIA8IZGkbvS
- dYdVavAxwLVgmBoWVegg7eOL6hB3W1BZHxra/TEiHy0gsTSMr/l8YRqsDX8A91D1
- mOMYqaEfqtJMuAKQwJDsXFiqXjsctQyUcx8TcPgsDTB/B8OPt9g8JdcmUUnWMaa9
- mj6i9VdVAoGBAPtdkkgKGPHxq4bxWKrwzaI1gUPlPikxA+rVeplNvF0pzt4yUHD6
- l56ash7Yl6E3n1s2mLh4IMYw8a5QZffHf51cQBtHkg72u4dXVUWLUGIzwsIkx6Zu
- kmIfWCkC3uMbcE7o1pQSGgc3nUUeD4K6KBqUWxinEzCIYu3a909MANL/AoGBANYP
- 2/troZi9SfYkxF/ZdCeufDsSj4DZICzG7wbKMUJ1jDpegPBT4D8d1Nip7tj10q/P
- 4HdRrv0p5ZsFgtua9Una+ICpjpkEqjGgHUBBD7o/+Jjd8cmE57DSNSMUTVY8mwi3
- OuKNI5itlU/8gwrAtEjV5qcgsUQSH6zZm69sx9GHAoGAb78ee/Y5Nr4YYipDH+Nj
- l3wv1k5AfzFyK2DyWtrrJYOjmrZFeqR86R6elwX1Cs/egT9ZT5DgCRvTJYpKeofv
- HLbZLQd5UDuyDK3vk7YmazHVoFeXhk7bttF2cOz8x3v4RqxOUI9nkNPRj9uYS6aU
- k5RmlyQXbNkFGfbhQRhpuWMCgYBqPFKzk3YOhJrJQvQGkbgY3XqgIpT/oEJclpoX
- 547M6eOfMDmTjDz1dgulP6jfGhjm0icXcH2E/R8LcY/BB0WG0tqTmBLCFFOW71y0
- /9UbXRY2X6fYmFIYKire7vt2uftDNxRNTTiGVFeO68XpNzwCDc470XjITKpVsWWX
- iUgdzwKBgQCs+hCR03K+6npMnANOBEi4I91pJQlbcMBK5fvX1w4VAwBIEu4Z58JM
- kBbpgmANffYjTWxMIEGvdq9yZWwKCw7pv8pu/dU63MS5ST10K+7/QaLejabCskzP
- Hjy15CfE5Iy/6aBIEz30uW/B/THd0icbGKSaFjor+sW7S2GqJvE9ow==
- -----END RSA PRIVATE KEY-----
- cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL8rDtTpG8fcQuxFnC19fllJpXa2RGJaoKClIrt+9mf9hi7J/4OIy6+LJvVrauaBCByQmMaVB4R20RPKH6UBhQwLFwlSqI8w21uzgYOqqlz3U2/79Brklca4idALgvkwDHYF4nhLBzWOcMwtdnbXOZxnArdSjkQ3KFCqXB7ZBn5LMfvpTsNCBIJTfGUj+J4EOILjDgcuPfPdffonptvfHfcNWHTZQJpYLB1TS/48Zb6/hEG1FgwC4KcSs5cZDKyDBynKpKkG9npBF2S0h4DIrlfxFUvHtO1rkE9UHyNux4X/Q3YLDGai8NudTMkwXyVFS66pfMo99d4eMNtZY6nN5
- cluster_domain: ocata-cicd.local
- cluster_name: ocata-cicd
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: TyDcKDMkhMxUlgpgsB0sXCfWQGyBdEJDIBpcRnfTlwS89gRFQZBQbyxYB9gy0kcQ
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.8.0/24
- control_vlan: '2422'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 172.16.49.126
- deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.49.64/26
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: sgudz@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.8.241
- infra_kvm01_deploy_address: 172.16.49.67
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.8.242
- infra_kvm02_deploy_address: 172.16.49.68
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.8.243
- infra_kvm03_deploy_address: 172.16.49.69
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.8.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 172.16.49.66
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openldap_domain: ocata-cicd.local
- openldap_enabled: 'True'
- openldap_organisation: ${_param:cluster_name}
- openstack_benchmark_node01_address: 10.167.8.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.8
- openstack_compute_rack01_tenant_subnet: 192.168.0
- openstack_control_address: 10.167.8.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.8.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.8.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.8.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.8.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.8.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.8.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.8.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 10.167.8.224
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 192.168.0.6
- openstack_gateway_node02_address: 10.167.8.225
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 192.168.0.7
- openstack_gateway_node03_address: 10.167.8.226
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 192.168.0.8
- openstack_message_queue_address: 10.167.8.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.8.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.8.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.8.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 10.167.8.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.8.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.8.82
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.8.19
- openstack_version: ocata
- oss_address: ${_param:stacklight_monitor_address}
- oss_cis_cert: ${_param:oss_openstack_cert}
- oss_cis_domain_id: ${_param:oss_openstack_domain_id}
- oss_cis_enabled: 'True'
- oss_cis_jobs_repository: https://github.com/Mirantis/rundeck-cis-jobs.git
- oss_cis_jobs_repository_branch: master
- oss_cis_password: ${_param:oss_openstack_password}
- oss_cis_project: ${_param:oss_openstack_project}
- oss_cis_username: ${_param:oss_openstack_username}
- oss_cleanup_service_enabled: 'True'
- oss_cleanup_service_os_credentials_path: ${_param:oss_openstack_credentials_path}
- oss_cleanup_service_password: ${_param:oss_openstack_password}
- oss_cleanup_service_project: ${_param:oss_openstack_project}
- oss_cleanup_service_project_domain_id: ${_param:oss_openstack_username_domain_id}
- oss_cleanup_service_username: ${_param:oss_openstack_username}
- oss_enabled: 'True'
- oss_node01_address: ${_param:stacklight_monitor_node01_address}
- oss_node02_address: ${_param:stacklight_monitor_node02_address}
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_from: sgudz@mirantis.com
- oss_webhook_recipients: sgudz@mirantis.com
- oss_openstack_auth_url: http://172.17.16.190:5000/v3
- oss_openstack_cert: ' -----BEGIN CERTIFICATE----- MIIE0DCCA7igAwIBAgIBBzANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
- EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT AAGjggEaMIIBFjAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV
- 91cxG7685C/b+LrTW+C05+Z5Yg4MotdqY3MxtfWoSKQ7CC2iXZDXtHwlTxFWMMS2 -----END CERTIFICATE-----
- -----BEGIN CERTIFICATE----- dGhvcml0eSAtIEcyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv3Fi
- 9kIDN0zmiN/VryTyscPfzLXs4Jlet0lUIDyUGAzHHFIYSaRt4bNYC8...'
- oss_openstack_credentials_path: /srv/volumes/rundeck/storage
- oss_openstack_domain_id: default
- oss_openstack_endpoint_type: public
- oss_openstack_password: nova
- oss_openstack_project: admin
- oss_openstack_ssl_verify: 'False'
- oss_openstack_username: admin
- oss_runbook_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEA0i/Kw7U6RvH3ELsRZwtfX5ZSaV2tkRiWqCgpSK7fvZn/YYuy
- f+DiMuviyb1a2rmgQgckJjGlQeEdtETyh+lAYUMCxcJUqiPMNtbs4GDqqpc91Nv+
- /Qa5JXGuInQC4L5MAx2BeJ4Swc1jnDMLXZ21zmcZwK3Uo5ENyhQqlwe2QZ+SzH76
- U7DQgSCU3xlI/ieBDiC4w4HLj3z3X36J6bb3x33DVh02UCaWCwdU0v+PGW+v4RBt
- RYMAuCnErOXGQysgwcpyqSpBvZ6QRdktIeAyK5X8RVLx7Tta5BPVB8jbseF/0N2C
- wxmovDbnUzJMF8lRUuuqXzKPfXeHjDbWWOpzeQIDAQABAoIBAQCdnXsmwWr/Mol8
- QVuucwfFDVulabszWEX5uYwj1gcwDiBFDNYBMSlO2DbL47QS2ypC+UnxXj83pNF6
- kk1w/8foZ7DXjX+hypBj+03MgMDYxet3CLYxFe6XFqVnbqmN0QX9PA4P3jHgpN1y
- j5CRVie0rPnR1Ejlk2vMwsyhTwYsNFmCqW0IC5qWHPyMGiOnFTcCYUIA8IZGkbvS
- dYdVavAxwLVgmBoWVegg7eOL6hB3W1BZHxra/TEiHy0gsTSMr/l8YRqsDX8A91D1
- mOMYqaEfqtJMuAKQwJDsXFiqXjsctQyUcx8TcPgsDTB/B8OPt9g8JdcmUUnWMaa9
- mj6i9VdVAoGBAPtdkkgKGPHxq4bxWKrwzaI1gUPlPikxA+rVeplNvF0pzt4yUHD6
- l56ash7Yl6E3n1s2mLh4IMYw8a5QZffHf51cQBtHkg72u4dXVUWLUGIzwsIkx6Zu
- kmIfWCkC3uMbcE7o1pQSGgc3nUUeD4K6KBqUWxinEzCIYu3a909MANL/AoGBANYP
- 2/troZi9SfYkxF/ZdCeufDsSj4DZICzG7wbKMUJ1jDpegPBT4D8d1Nip7tj10q/P
- 4HdRrv0p5ZsFgtua9Una+ICpjpkEqjGgHUBBD7o/+Jjd8cmE57DSNSMUTVY8mwi3
- OuKNI5itlU/8gwrAtEjV5qcgsUQSH6zZm69sx9GHAoGAb78ee/Y5Nr4YYipDH+Nj
- l3wv1k5AfzFyK2DyWtrrJYOjmrZFeqR86R6elwX1Cs/egT9ZT5DgCRvTJYpKeofv
- HLbZLQd5UDuyDK3vk7YmazHVoFeXhk7bttF2cOz8x3v4RqxOUI9nkNPRj9uYS6aU
- k5RmlyQXbNkFGfbhQRhpuWMCgYBqPFKzk3YOhJrJQvQGkbgY3XqgIpT/oEJclpoX
- 547M6eOfMDmTjDz1dgulP6jfGhjm0icXcH2E/R8LcY/BB0WG0tqTmBLCFFOW71y0
- /9UbXRY2X6fYmFIYKire7vt2uftDNxRNTTiGVFeO68XpNzwCDc470XjITKpVsWWX
- iUgdzwKBgQCs+hCR03K+6npMnANOBEi4I91pJQlbcMBK5fvX1w4VAwBIEu4Z58JM
- kBbpgmANffYjTWxMIEGvdq9yZWwKCw7pv8pu/dU63MS5ST10K+7/QaLejabCskzP
- Hjy15CfE5Iy/6aBIEz30uW/B/THd0icbGKSaFjor+sW7S2GqJvE9ow==
- -----END RSA PRIVATE KEY-----
- oss_runbook_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL8rDtTpG8fcQuxFnC19fllJpXa2RGJaoKClIrt+9mf9hi7J/4OIy6+LJvVrauaBCByQmMaVB4R20RPKH6UBhQwLFwlSqI8w21uzgYOqqlz3U2/79Brklca4idALgvkwDHYF4nhLBzWOcMwtdnbXOZxnArdSjkQ3KFCqXB7ZBn5LMfvpTsNCBIJTfGUj+J4EOILjDgcuPfPdffonptvfHfcNWHTZQJpYLB1TS/48Zb6/hEG1FgwC4KcSs5cZDKyDBynKpKkG9npBF2S0h4DIrlfxFUvHtO1rkE9UHyNux4X/Q3YLDGai8NudTMkwXyVFS66pfMo99d4eMNtZY6nN5
- oss_rundeck_external_datasource_enabled: 'False'
- oss_security_audit_enabled: 'True'
- oss_security_audit_os_credentials_path: ${_param:oss_openstack_credentials_path}
- oss_security_audit_password: ${_param:oss_openstack_password}
- oss_security_audit_project: ${_param:oss_openstack_project}
- oss_security_audit_project_domain_id: ${_param:oss_openstack_domain_id}
- oss_security_audit_user_domain_id: ${_param:oss_openstack_domain_id}
- oss_security_audit_username: ${_param:oss_openstack_username}
- oss_sfdc_support_enabled: 'True'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- rundeck_forward_iframe: 'False'
- rundeck_iframe_host: ${_param:openstack_proxy_address}
- rundeck_iframe_port: ${_param:haproxy_rundeck_exposed_port}
- rundeck_iframe_ssl: 'False'
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: qFc7FCrLBnWkeLxRWWQeezH5dmuI5vsI
- salt_api_password_hash: $6$hbWiURAY$Sfzk6dzos6j1B8gFDK6WoGNDk0I2Bd2IOarWDGOflgY2sBpUJ4KTq1Uw241ri933/ROHTSuhNcodmDe13i5gS.
- salt_master_address: 172.16.49.66
- salt_master_hostname: cfg01
- salt_master_management_address: 172.16.49.66
- sfdc_auth_url: asas
- sfdc_consumer_key: admin
- sfdc_consumer_secret: admin
- sfdc_environment_id: admin
- sfdc_organization_id: admin
- sfdc_password: admin
- sfdc_sandbox_enabled: 'False'
- sfdc_username: admin
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- stacklight_enabled: 'True'
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: ''
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 192.168.0.0/24
- tenant_vlan: '2423'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'True'
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-environment.yaml
deleted file mode 100644
index 1411196..0000000
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-environment.yaml
+++ /dev/null
@@ -1,128 +0,0 @@
-nodes:
- # Virtual Control Plane nodes
-
- cid01.ocata-cicd.local:
- reclass_storage_name: cicd_control_node01
- roles:
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- cid02.ocata-cicd.local:
- reclass_storage_name: cicd_control_node02
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- cid03.ocata-cicd.local:
- reclass_storage_name: cicd_control_node03
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ctl01.ocata-cicd.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ctl02.ocata-cicd.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ctl03.ocata-cicd.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs01.ocata-cicd.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs02.ocata-cicd.local:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs03.ocata-cicd.local:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg01.ocata-cicd.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg02.ocata-cicd.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg03.ocata-cicd.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- prx01.ocata-cicd.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- prx02.ocata-cicd.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
deleted file mode 100644
index fa2d723..0000000
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
+++ /dev/null
@@ -1,134 +0,0 @@
-{% from 'cookied-bm-ocata-cicd-pipeline/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-ocata-cicd-pipeline/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-ocata-cicd-pipeline/underlay.yaml' import LAB_CONFIG_NAME with context %}
-
-{% set NFV_ENABLED = os_env('NFV_ENABLED','false') %}
-
-{%- if NFV_ENABLED == 'true' %}
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-initial-cicd-nfv.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory-nfv.yaml'] %}
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-ocata-cicd-nfv') %}
-{%- else %}
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-initial-cicd.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-ocata-cicd') %}
-{%- endif %}
-
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
-
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-- description: 'Workaround for typo in salt.minion.service (https://gerrit.mcp.mirantis.com/#/c/14806/)'
- cmd: |
- git clone https://gerrit.mcp.mirantis.com/salt-formulas/salt /tmp/salt-formula-salt;
- pushd /tmp/salt-formula-salt;
- git fetch https://gerrit.mcp.mirantis.com/salt-formulas/salt refs/changes/06/14806/1 && git checkout FETCH_HEAD;
- popd;
- cp /tmp/salt-formula-salt/salt/minion/service.sls /usr/share/salt-formulas/env/salt/minion/service.sls;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-- description: "Workaround for rack01 compute generator"
- cmd: |
- set -e;
- # Remove rack01 key
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- # Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml --merge;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: "Workaround for PROD-16973"
- cmd: |
- set -e;
- # Remove obsolete logging section key
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.nova.controller.logging /srv/salt/reclass/classes/system/nova/control/cluster.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: WR for mounting 1G hugepages before linux.state
- cmd: |
- salt 'cmp*' state.sls linux.system.hugepages;
- salt 'cmp*' cmd.run "mount -o mode=775,pagesize=1G -t hugetlbfs Hugetlbfs-kvm /mnt/hugepages_1G";
- salt 'cmp*' cmd.run "echo 16 | sudo tee /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: WR for correct acces to git repo from jenkins on cfg01 node
- cmd: |
- git clone --mirror https://github.com/Mirantis/mk-pipelines.git /home/repo/mk/mk-pipelines/;
- git clone --mirror https://github.com/Mirantis/pipeline-library.git /home/repo/mcp-ci/pipeline-library/;
- chown -R git:www-data /home/repo/mk/mk-pipelines/*;
- chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-#########################################
-# Configure all running salt minion nodes
-#########################################
-
-- description: Hack resolv.conf on VCP nodes for internal services access
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Refresh pillars on all minions
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync all salt resources
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Show reclass-salt --top for generated nodes
- cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--meta-data.yaml
deleted file mode 100644
index a594a53..0000000
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 3839f93..0000000
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,76 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml
deleted file mode 100644
index 1837e32..0000000
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- # The loopback network interface
- auto lo
- iface lo inet loopback
- auto {interface_name}
- iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
deleted file mode 100644
index dad3faf..0000000
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
+++ /dev/null
@@ -1,535 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-ocata-cicd-pipeline') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-# {% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
-
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
-{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
-{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.68') %}
-{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.69') %}
-{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.73') %}
-{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
-# {% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.167.140') %}
-{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.49.75') %}
-# {% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.76') %}
-
-{% import 'cookied-bm-ocata-cicd-pipeline/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
-
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'ocata-cicd_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
- params:
- ip_reserved:
- gateway: +62
- l2_network_device: +61
- default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
- default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
- default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
- default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
- default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
- default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
- default_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
- default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
- # default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
- virtual_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
- virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
- virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
- virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
- virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
- virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
- # virtual_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
- virtual_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
- # virtual_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
- #ip_ranges:
- # dhcp: [+2, -4]
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: -2
-
- groups:
-
- - name: virtual
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-
- network_pools:
- admin: admin-pool01
-
- l2_network_devices:
- # Ironic management interface
- admin:
- address_pool: admin-pool01
- dhcp: false
- parent_iface:
- phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
- #- label: ens4
- # l2_network_device: private
- # interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- #ens4:
- # networks:
- # - private
-
-
- - name: default
- driver:
- name: devops_driver_ironic
- params:
- os_auth_token: fake-token
- ironic_url: !os_env IRONIC_URL # URL that will be used by fuel-devops
- # to access Ironic API
- # Agent URL that is accessible from deploying node when nodes
- # are bootstrapped with PXE. Usually PXE/provision network address is used.
- agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
- agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
-
- network_pools:
- admin: admin-pool01
-
- nodes:
-
- # - name: {{ HOSTNAME_CFG01 }}
- # role: salt_master
- # params:
- # ipmi_user: !os_env IPMI_USER
- # ipmi_password: !os_env IPMI_PASSWORD
- # ipmi_previlegies: OPERATOR
- # ipmi_host: !os_env IPMI_HOST_CFG01 # hostname or IP address
- # ipmi_lan_interface: lanplus
- # ipmi_port: 623
-
- # root_volume_name: system # see 'volumes' below
- # cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: enp3s0f1 # see 'interfaces' below.
- # volumes:
- # - name: system
- # capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # # The same as for agent URL, here is an URL to the image that should be
- # # used for deploy the node. It should also be accessible from deploying
- # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
-
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data_cfg01
-
- # interfaces:
- # - label: enp3s0f0 # Infra interface
- # mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
- # - label: enp3s0f1
- # l2_network_device: admin
- # mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
-
- # network_config:
- # enp3s0f0:
- # networks:
- # - infra
- # enp3s0f1:
- # networks:
- # - admin
-
- - name: {{ HOSTNAME_KVM01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp9s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
- - label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
-
- network_config:
- enp9s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
-
- - name: {{ HOSTNAME_KVM02 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp9s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
- - label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
-
- network_config:
- enp9s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
-
- - name: {{ HOSTNAME_KVM03 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM03 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: eno1 # see 'interfaces' below.
- cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- # - label: eno1
- - label: enp9s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
- # - label: eno2
- - label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
-
- network_config:
- # eno1:
- enp9s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
-
- - name: {{ HOSTNAME_CMP001 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP001 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
- cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp2s0f0
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
- - label: enp2s0f1
- l2_network_device: admin
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
- - label: enp5s0f0
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
- features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
- - label: enp5s0f1
- mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
- features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
- # - label: enp5s0f2
- # mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
- # features: ['dpdk', 'dpdk_pci: 0000:05:00.2']
-
- network_config:
- enp2s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp5s0f0
- - enp5s0f1
-
-
-
- - name: {{ HOSTNAME_CMP002 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP002 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: eno1 # see 'interfaces' below.
- cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- # - label: eno1
- - label: enp2s0f0
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
- # - label: eth0
- - label: enp2s0f1
- l2_network_device: admin
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
- # - label: eth3
- - label: enp5s0f0
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
- features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
- # - label: eth2
- - label: enp5s0f1
- mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
- features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
- # - label: eth4
- # mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
- # features: ['dpdk', 'dpdk_pci: 0000:0b:00.0']
-
- network_config:
- enp2s0f1:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp5s0f0
- - enp5s0f1
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_GTW01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp9s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_GTW01
- - label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_GTW01
-
- network_config:
- enp9s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp9s0f1
diff --git a/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/lab04-physical-inventory.yaml
deleted file mode 100644
index 69fa20e..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/lab04-physical-inventory.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-nodes:
- cfg01.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- - features_runtest_cfg
- interfaces:
- ens3:
- role: single_static_mgm
- ens4:
- role: single_static_ctl
- # Physical nodes
-
- kvm01.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_dhcp
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
- ens11f0:
- role: single_mgm_manual
-
- kvm02.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_dhcp
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
- ens11f0:
- role: single_mgm_manual
-
- kvm03.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp9s0f0:
- role: single_dhcp
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
- ens11f0:
- role: single_mgm_manual
-
- osd<<count>>:
- reclass_storage_name: ceph_osd_rack01
- roles:
- - ceph_osd
- - linux_system_codename_xenial
- interfaces:
- enp2s0f0:
- role: single_dhcp
- enp2s0f1:
- role: single_vlan_ctl
-# role: bond0_ab_vlan_ceph_storage_backend
-
- cmp<<count>>:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - linux_system_codename_xenial
- interfaces:
- enp2s0f1:
- role: single_dhcp
- enp5s0f0:
- role: bond0_ab_contrail
- enp5s0f1:
- role: single_vlan_ctl
diff --git a/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/salt-context-cookiecutter-contrail.yaml
deleted file mode 100644
index 43b6d4e..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/salt-context-cookiecutter-contrail.yaml
+++ /dev/null
@@ -1,445 +0,0 @@
-default_context:
- backup_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
- +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
- qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
- m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
- 7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
- 2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
- HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
- AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
- o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
- 5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
- XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
- AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
- USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
- uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
- QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
- 98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
- r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
- qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
- CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
- p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
- 79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
- NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
- CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
- XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
- N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
- bmk_enabled: 'False'
- cicd_control_node01_address: 10.167.8.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.8.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.8.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.8.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
- oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
- IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
- kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
- wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
- 27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
- 5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
- lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
- k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
- 3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
- dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
- 0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
- qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
- BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
- UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
- VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
- 1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
- nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
- Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
- FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
- HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
- Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
- poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
- 17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
- l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
- -----END RSA PRIVATE KEY-----
- cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
- cluster_domain: cookied-cicd-bm-ocata-contrail-maas.local
- cluster_name: cookied-cicd-bm-ocata-contrail-maas
- opencontrail_version: 4.1
- linux_repo_contrail_component: oc41
- compute_bond_mode: active-backup
- compute_padding_with_zeros: 'True'
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.8.0/24
- control_vlan: '2422'
- tenant_vlan: '2423'
- backend_vlan: '2424'
- storage_vlan: '2425' # not implemented yet, placeholder
- cookiecutter_template_branch: ''
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 172.16.49.65
- deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.49.64/26
- deployment_type: physical
- dns_server01: 172.18.208.44
- dns_server02: 172.18.176.6
- email_address: sgudz@mirantis.com
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.8.241
- infra_kvm01_deploy_address: 172.16.49.67
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.8.242
- infra_kvm02_deploy_address: 172.16.49.68
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.8.243
- infra_kvm03_deploy_address: 172.16.49.69
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.8.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- internal_proxy_enabled: 'False'
- kqueen_custom_mail_enabled: 'False'
- kqueen_enabled: 'False'
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_enabled: 'True'
- maas_deploy_address: 172.16.49.66
- maas_deploy_cidr: 172.16.49.64/26
- maas_deploy_gateway: 172.16.49.65
- maas_deploy_range_end: 172.16.49.119
- maas_deploy_range_start: 172.16.49.77
- maas_deploy_vlan: '0'
- maas_dhcp_enabled: 'True'
- maas_fabric_name: fabric-0
- maas_hostname: cfg01
- maas_manage_deploy_network: 'True'
- maas_machines: |
- kvm01: # cz7341-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- # pxe_interface_mac:
- pxe_interface_mac: "0c:c4:7a:6c:83:56"
- interfaces:
- enp9s0f0:
- mac: "0c:c4:7a:6c:83:56"
- mode: "static"
- ip: "172.16.49.67"
- subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "5.43.225.117"
- power_pass: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- kvm02: # #cz7342-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:6c:84:2c"
- interfaces:
- enp9s0f0:
- mac: "0c:c4:7a:6c:84:2c"
- mode: "static"
- ip: "172.16.49.68"
- subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "5.43.225.118"
- power_pass: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- kvm03: # #cz7343-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:6c:83:54"
- interfaces:
- enp9s0f0:
- mac: "0c:c4:7a:6c:83:54"
- mode: "static"
- ip: "172.16.49.69"
- subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "5.43.225.119"
- power_pass: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- osd001: # #cz7343-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:55:6a:d4"
- interfaces:
- enp2s0f0:
- mac: "0c:c4:7a:55:6a:d4"
- mode: "static"
- ip: "172.16.49.70"
- subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.243"
- power_pass: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- osd002: # #cz7343-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:55:6a:56"
- interfaces:
- enp2s0f0:
- mac: "0c:c4:7a:55:6a:56"
- mode: "static"
- ip: "172.16.49.71"
- subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.244"
- power_pass: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- osd003: # #cz7343-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:55:6a:2a"
- interfaces:
- enp2s0f0:
- mac: "0c:c4:7a:55:6a:2a"
- mode: "static"
- ip: "172.16.49.72"
- subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.245"
- power_pass: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- cmp001: # #cz7345-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:54:a2:5f"
- interfaces:
- enp2s0f1:
- mac: "0c:c4:7a:54:a2:5f"
- mode: "static"
- ip: "172.16.49.73"
- subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.233"
- power_pass: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- cmp002: # cz7346-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:54:a0:51"
- interfaces:
- enp2s0f1:
- mac: "0c:c4:7a:54:a0:51"
- mode: "static"
- ip: "172.16.49.74"
- subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.232"
- power_pass: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
-
- mcp_common_scripts_branch: ''
- mcp_version: proposed
- offline_deployment: 'False'
- opencontrail_analytics_address: 10.167.8.30
- opencontrail_analytics_hostname: nal
- opencontrail_analytics_node01_address: 10.167.8.31
- opencontrail_analytics_node01_hostname: nal01
- opencontrail_analytics_node02_address: 10.167.8.32
- opencontrail_analytics_node02_hostname: nal02
- opencontrail_analytics_node03_address: 10.167.8.33
- opencontrail_analytics_node03_hostname: nal03
- opencontrail_compute_iface_mask: '24'
- opencontrail_control_address: 10.167.8.20
- opencontrail_control_hostname: ntw
- opencontrail_control_node01_address: 10.167.8.21
- opencontrail_control_node01_hostname: ntw01
- opencontrail_control_node02_address: 10.167.8.22
- opencontrail_control_node02_hostname: ntw02
- opencontrail_control_node03_address: 10.167.8.23
- opencontrail_control_node03_hostname: ntw03
- opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.220
- opencontrail_router01_hostname: rtr01
- opencontrail_router02_address: 10.167.8.101
- opencontrail_router02_hostname: rtr02
- openldap_enabled: 'False'
- openssh_groups: ''
- openstack_benchmark_node01_address: 10.167.8.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_single_address_ranges: 10.167.8.101-10.167.8.102
- openstack_compute_deploy_address_ranges: 172.16.49.73-172.16.49.74
- openstack_compute_tenant_address_ranges: 10.167.10.101-10.167.10.102
- openstack_compute_backend_address_ranges: 10.167.10.101-10.167.10.102
- openstack_control_address: 10.167.8.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.8.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.8.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.8.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.8.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.8.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.8.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.8.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_message_queue_address: 10.167.8.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.8.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.8.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.8.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: opencontrail
- openstack_neutron_bgp_vpn: 'False'
- openstack_neutron_bgp_vpn_driver: bagpipe
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_nova_compute_reserved_host_memory_mb: '900'
- openstack_proxy_address: 10.167.8.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.8.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.8.82
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.8.19
- openstack_version: ocata
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
- salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
- salt_master_address: 10.167.8.15
- salt_master_hostname: cfg01
- salt_master_management_address: 172.16.49.66
- shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- stacklight_enabled: 'True'
- stacklight_log_address: 10.167.8.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.8.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.8.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.8.63
- stacklight_log_node03_hostname: log03
- stacklight_long_term_storage_type: prometheus
- stacklight_monitor_address: 10.167.8.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.8.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.8.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.8.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 10.167.8.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.8.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.8.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.8.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.167.10.253
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.167.10.0/24
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'True'
- openldap_domain: cookied-cicd-bm-ocata-contrail-maas.local
- openldap_enabled: 'True'
- openldap_organisation: ${_param:cluster_name}
- ceph_enabled: 'True'
- ceph_version: "luminous"
- ceph_hyper_converged: "False"
- ceph_osd_backend: "bluestore"
- ceph_osd_count: "3"
- ceph_osd_node_count: 3
- ceph_osd_block_db_size: 20
- ceph_osd_journal_size: 20
- ceph_osd_bond_mode: "active-backup"
- ceph_osd_data_partition_prefix: ""
-
- ceph_public_network_allocation: storage
- ceph_public_network: "10.167.8.0/24"
- ceph_cluster_network: "10.167.8.0/24"
-
-# for 2018.11.0+
- ceph_osd_single_address_ranges: "10.167.8.200-10.167.8.202"
- ceph_osd_deploy_address_ranges: "172.16.49.70-172.16.49.72"
- ceph_osd_storage_address_ranges: "10.167.8.200-10.167.8.202"
- ceph_osd_backend_address_ranges: "10.167.10.200-10.167.10.202"
-
- ceph_osd_data_disks: "/dev/sdb"
- ceph_osd_journal_or_block_db_disks: "/dev/sdb"
- ceph_osd_mode: "separated"
- ceph_osd_deploy_nic: "eth0"
- ceph_osd_primary_first_nic: "eth1"
- ceph_osd_primary_second_nic: "eth2"
- #ceph_mon_node01_address: "172.16.47.66"
- #ceph_mon_node01_deploy_address: "172.16.48.66"
- ceph_mon_node01_address: "10.167.8.66"
- ceph_mon_node01_hostname: "cmn01"
- #ceph_mon_node02_address: "172.16.47.67"
- #ceph_mon_node02_deploy_address: "172.16.48.67"
- ceph_mon_node02_address: "10.167.8.67"
- ceph_mon_node02_hostname: "cmn02"
- #ceph_mon_node03_address: "172.16.47.68"
- #ceph_mon_node03_deploy_address: "172.16.48.68"
- ceph_mon_node03_address: "10.167.8.68"
- ceph_mon_node03_hostname: "cmn03"
- #ceph_rgw_address: "172.16.47.75"
- ceph_rgw_address: "10.167.8.75"
- #ceph_rgw_node01_address: "172.16.47.76"
- #ceph_rgw_node01_deploy_address: "172.16.48.76"
- ceph_rgw_node01_address: "10.167.8.76"
- ceph_rgw_node01_hostname: "rgw01"
- #ceph_rgw_node02_address: "172.16.47.77"
- #ceph_rgw_node02_deploy_address: "172.16.48.77"
- ceph_rgw_node02_address: "10.167.8.77"
- ceph_rgw_node02_hostname: "rgw02"
- #ceph_rgw_node03_address: "172.16.47.78"
- #ceph_rgw_node03_deploy_address: "172.16.48.78"
- ceph_rgw_node03_address: "10.167.8.78"
- ceph_rgw_node03_hostname: "rgw03"
- manila_enabled: 'False'
- barbican_enabled: 'False'
- barbican_integration_enabled: 'False'
- # SSL settings
- nova_vnc_tls_enabled: 'True'
- galera_ssl_enabled: 'True'
- openstack_mysql_x509_enabled: 'True'
- rabbitmq_ssl_enabled: 'True'
- openstack_rabbitmq_x509_enabled: 'True'
- openstack_internal_protocol: 'https'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/salt-context-environment.yaml b/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/salt-context-environment.yaml
deleted file mode 100644
index f150236..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/salt-context-environment.yaml
+++ /dev/null
@@ -1,388 +0,0 @@
-nodes:
- # Virtual Control Plane nodes
- cid01.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: cicd_control_node01
- roles:
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- cid02.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: cicd_control_node02
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- cid03.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: cicd_control_node03
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ctl01.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ctl02.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ctl03.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- dbs01.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- dbs02.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- dbs03.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- msg01.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- msg02.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- msg03.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- prx01.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- prx02.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mon01.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mon02.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mon03.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- nal01.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: opencontrail_analytics_node01
- roles:
- - opencontrail_analytics
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- nal02.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: opencontrail_analytics_node02
- roles:
- - opencontrail_analytics
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- nal03.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: opencontrail_analytics_node03
- roles:
- - opencontrail_analytics
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ntw01.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: opencontrail_control_node01
- roles:
- - opencontrail_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ntw02.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: opencontrail_control_node02
- roles:
- - opencontrail_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ntw03.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: opencontrail_control_node03
- roles:
- - opencontrail_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mtr01.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mtr02.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mtr03.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- log01.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- log02.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- log03.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- cmn01.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: ceph_mon_node01
- roles:
- - ceph_mon
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- cmn02.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: ceph_mon_node02
- roles:
- - ceph_mon
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- cmn03.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: ceph_mon_node03
- roles:
- - ceph_mon
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- rgw01.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: ceph_rgw_node01
- roles:
- - ceph_rgw
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- rgw02.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: ceph_rgw_node02
- roles:
- - ceph_rgw
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- rgw03.cookied-cicd-bm-ocata-contrail-maas.local:
- reclass_storage_name: ceph_rgw_node03
- roles:
- - ceph_rgw
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
diff --git a/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/salt.yaml b/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/salt.yaml
deleted file mode 100644
index 23b0b54..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/salt.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-{% from 'cookied-cicd-bm-ocata-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-cicd-bm-ocata-contrail-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-cicd-bm-ocata-contrail-maas/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-- description: Wait for salt-master is ready after configdrive user-data
- cmd: |
- timeout 120 salt-call test.ping
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: Generate a public key for machines in MAAS
- cmd: |
- ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub
- maas mirantis sshkeys create key="$(cat ~root/.ssh/id_rsa.pub)"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Run comissioning of BM nodes
- cmd: |
- salt-call maas.process_machines
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Wait for machines ready
- cmd: |
- salt-call maas.machines_status && timeout 120 salt-call state.sls maas.machines.wait_for_ready
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 7, delay: 5}
- skip_fail: false
-
-- description: Enforce the interfaces configuration defined in the model for servers
- cmd: |
- salt-call state.sls maas.machines.assign_ip;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Remove all the salt-minions and re-register the cfg01 minion
- cmd: |
- salt-key -y -D;
- salt-call test.ping
- sleep 5
- # Check that the cfg01 is registered
- salt-key | grep cfg01
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: provision the automatically commissioned physical nodes through MAAS
- cmd: |
- salt-call maas.deploy_machines;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: Wait for machines deployed
- cmd: |
- salt-call maas.machines_status && timeout 300 salt-call state.sls maas.machines.wait_for_deployed
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 6, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/underlay.yaml b/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/underlay.yaml
deleted file mode 100644
index 44a486d..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-ocata-contrail-maas/underlay.yaml
+++ /dev/null
@@ -1,129 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-
-#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-bm-ocata-contrail-maas') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
-
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
-
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-cicd-bm-ocata-contrail-maas_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
- params:
- ip_reserved:
- gateway: '172.16.49.65'
- l2_network_device: +61
- default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
- ip_ranges:
- dhcp: [+2, -3]
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- ip_ranges:
- dhcp: [+2, -3]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- ip_ranges:
- dhcp: [+2, -3]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: -2
- ip_ranges:
- dhcp: [+2, -3]
-
- groups:
-
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-
- network_pools:
- admin: admin-pool01
-
- l2_network_devices:
- admin:
- address_pool: admin-pool01
- dhcp: false
- forward:
- mode: bridge
- parent_iface:
- phys_dev: !os_env LAB_MANAGEMENT_IFACE
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: bridge
- parent_iface:
- phys_dev: !os_env LAB_CONTROL_IFACE
-
- group_volumes:
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: config
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
-
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/lab04-physical-inventory.yaml
deleted file mode 100644
index 01e14b1..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/lab04-physical-inventory.yaml
+++ /dev/null
@@ -1,78 +0,0 @@
-nodes:
- cfg01.cookied-cicd-bm-os-contrail32-maas.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- - features_runtest_cfg
- interfaces:
- ens3:
- role: single_static_mgm
- ens4:
- role: single_static_ctl
- # Physical nodes
-
- kvm01.cookied-cicd-bm-os-contrail32-maas.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- one1:
- role: single_dhcp
- one2:
- role: bond0_ab_ovs_vlan_ctl
- ten1:
- role: single_mgm_manual
-
- kvm02.cookied-cicd-bm-os-contrail32-maas.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- one1:
- role: single_dhcp
- one2:
- role: bond0_ab_ovs_vlan_ctl
- ten1:
- role: single_mgm_manual
-
- kvm03.cookied-cicd-bm-os-contrail32-maas.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- one1:
- role: single_dhcp
- one2:
- role: bond0_ab_ovs_vlan_ctl
- ten1:
- role: single_mgm_manual
-
- osd<<count>>:
- reclass_storage_name: ceph_osd_rack01
- roles:
- - ceph_osd
- - linux_system_codename_xenial
- interfaces:
- one1:
- role: single_dhcp
- one2:
- role: single_vlan_ctl
-# role: bond0_ab_vlan_ceph_storage_backend
-
- cmp<<count>>:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - linux_system_codename_xenial
- interfaces:
- #one1: unused
- one2:
- role: single_dhcp
- ten1:
- role: bond0_ab_contrail
- ten2:
- role: single_vlan_ctl
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/salt-context-cookiecutter-contrail.yaml
deleted file mode 100644
index 5f626d2..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/salt-context-cookiecutter-contrail.yaml
+++ /dev/null
@@ -1,452 +0,0 @@
-default_context:
- backup_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
- +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
- qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
- m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
- 7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
- 2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
- HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
- AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
- o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
- 5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
- XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
- AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
- USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
- uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
- QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
- 98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
- r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
- qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
- CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
- p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
- 79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
- NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
- CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
- XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
- N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
- bmk_enabled: 'False'
- cicd_control_node01_address: 10.167.8.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.8.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.8.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.8.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
- oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
- IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
- kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
- wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
- 27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
- 5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
- lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
- k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
- 3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
- dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
- 0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
- qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
- BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
- UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
- VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
- 1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
- nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
- Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
- FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
- HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
- Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
- poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
- 17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
- l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
- -----END RSA PRIVATE KEY-----
- cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
- cluster_domain: cookied-cicd-bm-os-contrail32-maas-2018.8.0.local
- cluster_name: cookied-cicd-bm-os-contrail32-maas-2018.8.0
- opencontrail_version: 3.2
- linux_repo_contrail_component: oc32
- compute_bond_mode: active-backup
- compute_padding_with_zeros: 'True'
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.8.0/24
- control_vlan: '2422'
- tenant_vlan: '2423'
- backend_vlan: '2424'
- storage_vlan: '2425' # not implemented yet, placeholder
- cookiecutter_template_branch: ''
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 172.16.49.65
- deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.49.64/26
- deployment_type: physical
- dns_server01: 172.18.208.44
- dns_server02: 172.18.176.6
- email_address: sgudz@mirantis.com
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.8.241
- infra_kvm01_deploy_address: 172.16.49.67
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.8.242
- infra_kvm02_deploy_address: 172.16.49.68
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.8.243
- infra_kvm03_deploy_address: 172.16.49.69
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.8.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- internal_proxy_enabled: 'False'
- kqueen_custom_mail_enabled: 'False'
- kqueen_enabled: 'False'
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_enabled: 'True'
- maas_deploy_address: 172.16.49.66
- maas_deploy_cidr: 172.16.49.64/26
- maas_deploy_gateway: 172.16.49.65
- maas_deploy_range_end: 172.16.49.119
- maas_deploy_range_start: 172.16.49.77
- maas_deploy_vlan: '0'
- maas_dhcp_enabled: 'True'
- maas_fabric_name: fabric-0
- maas_hostname: cfg01
- maas_manage_deploy_network: 'True'
- maas_machines: |
- kvm01: # cz7341-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- # pxe_interface_mac:
- pxe_interface_mac: "0c:c4:7a:6c:83:56"
- interfaces:
- one1:
- mac: "0c:c4:7a:6c:83:56"
- mode: "static"
- ip: "172.16.49.67"
- subnet: "172.16.49.64/26" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "5.43.225.117"
- #power_pass: ==IPMI_PASS==
- power_password: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- kvm02: # #cz7342-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:6c:84:2c"
- interfaces:
- one1:
- mac: "0c:c4:7a:6c:84:2c"
- mode: "static"
- ip: "172.16.49.68"
- subnet: "172.16.49.64/26" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "5.43.225.118"
- #power_pass: ==IPMI_PASS==
- power_password: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- kvm03: # #cz7343-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:6c:83:54"
- interfaces:
- one1:
- mac: "0c:c4:7a:6c:83:54"
- mode: "static"
- ip: "172.16.49.69"
- subnet: "172.16.49.64/26" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "5.43.225.119"
- #power_pass: ==IPMI_PASS==
- power_password: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- osd001: # #cz7343-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:55:6a:d4"
- interfaces:
- one1:
- mac: "0c:c4:7a:55:6a:d4"
- mode: "static"
- ip: "172.16.49.70"
- subnet: "172.16.49.64/26" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.243"
- #power_pass: ==IPMI_PASS==
- power_password: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- osd002: # #cz7343-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:55:6a:57"
- interfaces:
- one1:
- mac: "0c:c4:7a:55:6a:57"
- mode: "static"
- ip: "172.16.49.71"
- subnet: "172.16.49.64/26" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.244"
- #power_pass: ==IPMI_PASS==
- power_password: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- osd003: # #cz7343-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:55:6a:2a"
- interfaces:
- one1:
- mac: "0c:c4:7a:55:6a:2a"
- mode: "static"
- ip: "172.16.49.72"
- subnet: "172.16.49.64/26" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.245"
- #power_pass: ==IPMI_PASS==
- power_password: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- cmp001: # #cz7345-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:54:a2:5f"
- interfaces:
- one2:
- mac: "0c:c4:7a:54:a2:5f"
- mode: "static"
- ip: "172.16.49.73"
- subnet: "172.16.49.64/26" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.233"
- #power_pass: ==IPMI_PASS==
- power_password: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- cmp002: # cz7346-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:54:a0:51"
- interfaces:
- one2:
- mac: "0c:c4:7a:54:a0:51"
- mode: "static"
- ip: "172.16.49.74"
- subnet: "172.16.49.64/26" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.232"
- #power_pass: ==IPMI_PASS==
- power_password: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
-
- mcp_common_scripts_branch: ''
- mcp_version: 2018.8.0
- offline_deployment: 'False'
- opencontrail_analytics_address: 10.167.8.30
- opencontrail_analytics_hostname: nal
- opencontrail_analytics_node01_address: 10.167.8.31
- opencontrail_analytics_node01_hostname: nal01
- opencontrail_analytics_node02_address: 10.167.8.32
- opencontrail_analytics_node02_hostname: nal02
- opencontrail_analytics_node03_address: 10.167.8.33
- opencontrail_analytics_node03_hostname: nal03
- opencontrail_compute_iface_mask: '24'
- opencontrail_control_address: 10.167.8.20
- opencontrail_control_hostname: ntw
- opencontrail_control_node01_address: 10.167.8.21
- opencontrail_control_node01_hostname: ntw01
- opencontrail_control_node02_address: 10.167.8.22
- opencontrail_control_node02_hostname: ntw02
- opencontrail_control_node03_address: 10.167.8.23
- opencontrail_control_node03_hostname: ntw03
- opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.220
- opencontrail_router01_hostname: rtr01
- opencontrail_router02_address: 10.167.8.101
- opencontrail_router02_hostname: rtr02
- openldap_enabled: 'False'
- openssh_groups: ''
- openstack_benchmark_node01_address: 10.167.8.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.8
- openstack_compute_rack01_tenant_subnet: 192.168.0
- openstack_compute_single_address_ranges: 10.167.8.101-10.167.8.102
- openstack_compute_deploy_address_ranges: 172.16.49.73-172.16.49.74
- openstack_compute_tenant_address_ranges: 10.167.10.101-10.167.10.102
- openstack_compute_backend_address_ranges: 10.167.10.101-10.167.10.102
- openstack_control_address: 10.167.8.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.8.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.8.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.8.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.8.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.8.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.8.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.8.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_message_queue_address: 10.167.8.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.8.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.8.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.8.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: opencontrail
- openstack_neutron_bgp_vpn: 'False'
- openstack_neutron_bgp_vpn_driver: bagpipe
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_nova_compute_reserved_host_memory_mb: '900'
- openstack_proxy_address: 10.167.8.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.8.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.8.82
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.8.19
- openstack_version: ocata
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
- salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
- salt_master_address: 10.167.8.15
- salt_master_hostname: cfg01
- salt_master_management_address: 172.16.49.66
- shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- stacklight_enabled: 'True'
- stacklight_log_address: 10.167.8.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.8.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.8.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.8.63
- stacklight_log_node03_hostname: log03
- stacklight_long_term_storage_type: prometheus
- stacklight_monitor_address: 10.167.8.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.8.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.8.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.8.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 10.167.8.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.8.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.8.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.8.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.220
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 192.168.0.0/24
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'True'
- openldap_domain: cookied-cicd-bm-os-contrail32-maas-2018.8.0.local
- openldap_enabled: 'True'
- openldap_organisation: ${_param:cluster_name}
- ceph_enabled: 'True'
- ceph_version: "luminous"
- ceph_hyper_converged: "False"
- ceph_osd_backend: "bluestore"
- ceph_osd_count: "3"
- ceph_osd_node_count: 3
- ceph_osd_block_db_size: 20
- ceph_osd_journal_size: 20
- ceph_osd_bond_mode: "active-backup"
- ceph_osd_data_partition_prefix: ""
-
- ceph_public_network_allocation: storage
- ceph_public_network: "10.167.8.0/24"
- ceph_cluster_network: "10.167.8.0/24"
-
- # For compatibility with 2018.8.0
- ceph_osd_rack01_deploy_subnet: '172.16.49'
- ceph_osd_rack01_single_subnet: '10.167.8'
- ceph_osd_rack01_backend_subnet: '10.167.8'
-
-# for 2018.11.0+
-# ceph_osd_deploy_address_ranges: "172.16.49.70-172.16.49.72"
-# ceph_osd_storage_address_ranges: "10.167.47.200-10.167.47.202"
-# ceph_osd_backend_address_ranges: "10.167.49.200-10.167.49.202"
-
- ceph_osd_data_disks: "/dev/sdb"
- ceph_osd_journal_or_block_db_disks: "/dev/sdb"
- ceph_osd_mode: "separated"
- ceph_osd_deploy_nic: "eth0"
- ceph_osd_primary_first_nic: "eth1"
- ceph_osd_primary_second_nic: "eth2"
- #ceph_mon_node01_address: "172.16.47.66"
- #ceph_mon_node01_deploy_address: "172.16.48.66"
- ceph_mon_node01_address: "10.167.8.66"
- ceph_mon_node01_hostname: "cmn01"
- #ceph_mon_node02_address: "172.16.47.67"
- #ceph_mon_node02_deploy_address: "172.16.48.67"
- ceph_mon_node02_address: "10.167.8.67"
- ceph_mon_node02_hostname: "cmn02"
- #ceph_mon_node03_address: "172.16.47.68"
- #ceph_mon_node03_deploy_address: "172.16.48.68"
- ceph_mon_node03_address: "10.167.8.68"
- ceph_mon_node03_hostname: "cmn03"
- #ceph_rgw_address: "172.16.47.75"
- ceph_rgw_address: "10.167.8.75"
- #ceph_rgw_node01_address: "172.16.47.76"
- #ceph_rgw_node01_deploy_address: "172.16.48.76"
- ceph_rgw_node01_address: "10.167.8.76"
- ceph_rgw_node01_hostname: "rgw01"
- #ceph_rgw_node02_address: "172.16.47.77"
- #ceph_rgw_node02_deploy_address: "172.16.48.77"
- ceph_rgw_node02_address: "10.167.8.77"
- ceph_rgw_node02_hostname: "rgw02"
- #ceph_rgw_node03_address: "172.16.47.78"
- #ceph_rgw_node03_deploy_address: "172.16.48.78"
- ceph_rgw_node03_address: "10.167.8.78"
- ceph_rgw_node03_hostname: "rgw03"
- manila_enabled: 'False'
- barbican_enabled: 'False'
- barbican_integration_enabled: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/salt-context-environment.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/salt-context-environment.yaml
deleted file mode 100644
index 2f5b431..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/salt-context-environment.yaml
+++ /dev/null
@@ -1,395 +0,0 @@
-nodes:
- # Virtual Control Plane nodes
- cid01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: cicd_control_node01
- roles:
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- cid02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: cicd_control_node02
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- cid03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: cicd_control_node03
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ctl01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ctl02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ctl03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- dbs01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader_2018_8_0
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- dbs02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database_2018_8_0
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- dbs03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database_2018_8_0
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- msg01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- msg02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- msg03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- prx01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- prx02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mon01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mon02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mon03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- nal01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node01
- roles:
- - opencontrail_analytics
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_dhcp
- eth2:
- role: single_ctl
-
- nal02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node02
- roles:
- - opencontrail_analytics
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_dhcp
- eth2:
- role: single_ctl
-
- nal03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node03
- roles:
- - opencontrail_analytics
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_dhcp
- eth2:
- role: single_ctl
-
- ntw01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node01
- roles:
- - opencontrail_control
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_dhcp
- eth2:
- role: single_ctl
-
- ntw02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node02
- roles:
- - opencontrail_control
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_dhcp
- eth2:
- role: single_ctl
-
- ntw03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node03
- roles:
- - opencontrail_control
- - linux_system_codename_trusty
- interfaces:
- eth1:
- role: single_dhcp
- eth2:
- role: single_ctl
-
- mtr01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mtr02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mtr03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- log01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- log02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- log03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- cmn01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: ceph_mon_node01
- roles:
- - ceph_mon
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- cmn02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: ceph_mon_node02
- roles:
- - ceph_mon
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- cmn03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: ceph_mon_node03
- roles:
- - ceph_mon
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- rgw01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: ceph_rgw_node01
- roles:
- - ceph_rgw
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- rgw02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: ceph_rgw_node02
- roles:
- - ceph_rgw
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- rgw03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: ceph_rgw_node03
- roles:
- - ceph_rgw
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
-# bmk01.cookied-bm-mcp-ocata-contrail.local:
-# reclass_storage_name: openstack_benchmark_node01
-# roles:
-# - openstack_benchmark
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/salt.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/salt.yaml
deleted file mode 100644
index b98e317..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/salt.yaml
+++ /dev/null
@@ -1,82 +0,0 @@
-{% from 'cookied-cicd-bm-os-contrail32-maas-2018.8.0/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-cicd-bm-os-contrail32-maas-2018.8.0/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-cicd-bm-os-contrail32-maas-2018.8.0/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-- description: Wait for salt-master is ready after configdrive user-data
- cmd: |
- timeout 120 salt-call test.ping
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: Prepare maas
- cmd: |
- salt-call state.sls maas.cluster;
- salt-call state.sls maas.region;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Generate a public key for machines in MAAS
- cmd: |
- ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub
- maas mirantis sshkeys create key="$(cat ~root/.ssh/id_rsa.pub)"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Run comissioning of BM nodes
- cmd: |
- salt-call maas.process_machines
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Wait for machines ready
- cmd: |
- salt-call maas.machines_status && timeout 120 salt-call state.sls maas.machines.wait_for_ready
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 7, delay: 5}
- skip_fail: false
-
-- description: Enforce the interfaces configuration defined in the model for servers
- cmd: |
- salt-call state.sls maas.machines.assign_ip;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Remove all the salt-minions and re-register the cfg01 minion
- cmd: |
- salt-key -y -D;
- salt-call test.ping
- sleep 5
- # Check that the cfg01 is registered
- salt-key | grep cfg01
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: provision the automatically commissioned physical nodes through MAAS
- cmd: |
- salt-call maas.deploy_machines;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: Wait for machines deployed
- cmd: |
- salt-call maas.machines_status && timeout 300 salt-call state.sls maas.machines.wait_for_deployed
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 6, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/underlay.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/underlay.yaml
deleted file mode 100644
index 0cf698d..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail32-maas-2018.8.0/underlay.yaml
+++ /dev/null
@@ -1,129 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-
-#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-bm-os-contrail32-maas-2018.8.0') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
-
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
-
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-cicd-bm-os-contrail32-maas-2018.8.0_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
- params:
- ip_reserved:
- gateway: '172.16.49.65'
- l2_network_device: +61
- default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
- ip_ranges:
- dhcp: [+2, -3]
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- ip_ranges:
- dhcp: [+2, -3]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- ip_ranges:
- dhcp: [+2, -3]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: -2
- ip_ranges:
- dhcp: [+2, -3]
-
- groups:
-
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-
- network_pools:
- admin: admin-pool01
-
- l2_network_devices:
- admin:
- address_pool: admin-pool01
- dhcp: false
- forward:
- mode: bridge
- parent_iface:
- phys_dev: !os_env LAB_MANAGEMENT_IFACE
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: bridge
- parent_iface:
- phys_dev: !os_env LAB_CONTROL_IFACE
-
- group_volumes:
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: config
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
-
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/lab04-physical-inventory.yaml
deleted file mode 100644
index e1b92fb..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/lab04-physical-inventory.yaml
+++ /dev/null
@@ -1,78 +0,0 @@
-nodes:
- cfg01.cookied-cicd-bm-os-contrail40-maas.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- - features_runtest_cfg
- interfaces:
- ens3:
- role: single_static_mgm
- ens4:
- role: single_static_ctl
- # Physical nodes
-
- kvm01.cookied-cicd-bm-os-contrail40-maas.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- one1:
- role: single_dhcp
- one2:
- role: bond0_ab_ovs_vlan_ctl
- ten1:
- role: single_mgm_manual
-
- kvm02.cookied-cicd-bm-os-contrail40-maas.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- one1:
- role: single_dhcp
- one2:
- role: bond0_ab_ovs_vlan_ctl
- ten1:
- role: single_mgm_manual
-
- kvm03.cookied-cicd-bm-os-contrail40-maas.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- one1:
- role: single_dhcp
- one2:
- role: bond0_ab_ovs_vlan_ctl
- ten1:
- role: single_mgm_manual
-
- osd<<count>>:
- reclass_storage_name: ceph_osd_rack01
- roles:
- - ceph_osd
- - linux_system_codename_xenial
- interfaces:
- one1:
- role: single_dhcp
- one2:
- role: single_vlan_ctl
-# role: bond0_ab_vlan_ceph_storage_backend
-
- cmp<<count>>:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - linux_system_codename_xenial
- interfaces:
- #one1: unused
- one2:
- role: single_dhcp
- ten1:
- role: bond0_ab_contrail
- ten2:
- role: single_vlan_ctl
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt-context-cookiecutter-contrail.yaml
deleted file mode 100644
index 3256d10..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt-context-cookiecutter-contrail.yaml
+++ /dev/null
@@ -1,452 +0,0 @@
-default_context:
- backup_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
- +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
- qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
- m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
- 7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
- 2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
- HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
- AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
- o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
- 5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
- XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
- AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
- USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
- uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
- QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
- 98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
- r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
- qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
- CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
- p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
- 79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
- NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
- CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
- XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
- N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
- bmk_enabled: 'False'
- cicd_control_node01_address: 10.167.8.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.8.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.8.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.8.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
- oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
- IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
- kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
- wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
- 27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
- 5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
- lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
- k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
- 3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
- dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
- 0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
- qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
- BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
- UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
- VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
- 1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
- nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
- Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
- FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
- HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
- Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
- poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
- 17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
- l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
- -----END RSA PRIVATE KEY-----
- cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
- cluster_domain: cookied-bm-4.0-contrail.local
- cluster_name: cookied-bm-4.0-contrail
- opencontrail_version: 4.0
- linux_repo_contrail_component: oc40
- compute_bond_mode: active-backup
- compute_padding_with_zeros: 'True'
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.8.0/24
- control_vlan: '2422'
- tenant_vlan: '2423'
- backend_vlan: '2424'
- storage_vlan: '2425' # not implemented yet, placeholder
- cookiecutter_template_branch: ''
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 172.16.49.65
- deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.49.64/26
- deployment_type: physical
- dns_server01: 172.18.208.44
- dns_server02: 172.18.176.6
- email_address: sgudz@mirantis.com
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.8.241
- infra_kvm01_deploy_address: 172.16.49.67
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.8.242
- infra_kvm02_deploy_address: 172.16.49.68
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.8.243
- infra_kvm03_deploy_address: 172.16.49.69
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.8.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- internal_proxy_enabled: 'False'
- kqueen_custom_mail_enabled: 'False'
- kqueen_enabled: 'False'
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_enabled: 'True'
- maas_deploy_address: 172.16.49.66
- maas_deploy_cidr: 172.16.49.64/26
- maas_deploy_gateway: 172.16.49.65
- maas_deploy_range_end: 172.16.49.119
- maas_deploy_range_start: 172.16.49.77
- maas_deploy_vlan: '0'
- maas_dhcp_enabled: 'True'
- maas_fabric_name: fabric-0
- maas_hostname: cfg01
- maas_manage_deploy_network: 'True'
- maas_machines: |
- kvm01: # cz7341-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- # pxe_interface_mac:
- pxe_interface_mac: "0c:c4:7a:6c:83:56"
- interfaces:
- one1:
- mac: "0c:c4:7a:6c:83:56"
- mode: "static"
- ip: "172.16.49.67"
- subnet: "172.16.49.64/26" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "5.43.225.117"
- #power_pass: ==IPMI_PASS==
- power_password: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- kvm02: # #cz7342-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:6c:84:2c"
- interfaces:
- one1:
- mac: "0c:c4:7a:6c:84:2c"
- mode: "static"
- ip: "172.16.49.68"
- subnet: "172.16.49.64/26" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "5.43.225.118"
- #power_pass: ==IPMI_PASS==
- power_password: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- kvm03: # #cz7343-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:6c:83:54"
- interfaces:
- one1:
- mac: "0c:c4:7a:6c:83:54"
- mode: "static"
- ip: "172.16.49.69"
- subnet: "172.16.49.64/26" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "5.43.225.119"
- #power_pass: ==IPMI_PASS==
- power_password: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- osd001: # #cz7343-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:55:6a:d4"
- interfaces:
- one1:
- mac: "0c:c4:7a:55:6a:d4"
- mode: "static"
- ip: "172.16.49.70"
- subnet: "172.16.49.64/26" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.243"
- #power_pass: ==IPMI_PASS==
- power_password: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- osd002: # #cz7343-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:55:6a:57"
- interfaces:
- one1:
- mac: "0c:c4:7a:55:6a:57"
- mode: "static"
- ip: "172.16.49.71"
- subnet: "172.16.49.64/26" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.244"
- #power_pass: ==IPMI_PASS==
- power_password: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- osd003: # #cz7343-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:55:6a:2a"
- interfaces:
- one1:
- mac: "0c:c4:7a:55:6a:2a"
- mode: "static"
- ip: "172.16.49.72"
- subnet: "172.16.49.64/26" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.245"
- #power_pass: ==IPMI_PASS==
- power_password: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- cmp001: # #cz7345-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:54:a2:5f"
- interfaces:
- one2:
- mac: "0c:c4:7a:54:a2:5f"
- mode: "static"
- ip: "172.16.49.73"
- subnet: "172.16.49.64/26" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.233"
- #power_pass: ==IPMI_PASS==
- power_password: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- cmp002: # cz7346-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:54:a0:51"
- interfaces:
- one2:
- mac: "0c:c4:7a:54:a0:51"
- mode: "static"
- ip: "172.16.49.74"
- subnet: "172.16.49.64/26" # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- power_parameters:
- power_address: "185.8.59.232"
- #power_pass: ==IPMI_PASS==
- power_password: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
-
- mcp_common_scripts_branch: ''
- mcp_version: proposed
- offline_deployment: 'False'
- opencontrail_analytics_address: 10.167.8.30
- opencontrail_analytics_hostname: nal
- opencontrail_analytics_node01_address: 10.167.8.31
- opencontrail_analytics_node01_hostname: nal01
- opencontrail_analytics_node02_address: 10.167.8.32
- opencontrail_analytics_node02_hostname: nal02
- opencontrail_analytics_node03_address: 10.167.8.33
- opencontrail_analytics_node03_hostname: nal03
- opencontrail_compute_iface_mask: '24'
- opencontrail_control_address: 10.167.8.20
- opencontrail_control_hostname: ntw
- opencontrail_control_node01_address: 10.167.8.21
- opencontrail_control_node01_hostname: ntw01
- opencontrail_control_node02_address: 10.167.8.22
- opencontrail_control_node02_hostname: ntw02
- opencontrail_control_node03_address: 10.167.8.23
- opencontrail_control_node03_hostname: ntw03
- opencontrail_enabled: 'True'
- opencontrail_router01_address: 10.167.8.220
- opencontrail_router01_hostname: rtr01
- opencontrail_router02_address: 10.167.8.101
- opencontrail_router02_hostname: rtr02
- openldap_enabled: 'False'
- openssh_groups: ''
- openstack_benchmark_node01_address: 10.167.8.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.8
- openstack_compute_rack01_tenant_subnet: 192.168.0
- openstack_compute_single_address_ranges: 10.167.8.101-10.167.8.102
- openstack_compute_deploy_address_ranges: 172.16.49.73-172.16.49.74
- openstack_compute_tenant_address_ranges: 10.167.10.101-10.167.10.102
- openstack_compute_backend_address_ranges: 10.167.10.101-10.167.10.102
- openstack_control_address: 10.167.8.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.8.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.8.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.8.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.8.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.8.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.8.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.8.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_message_queue_address: 10.167.8.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.8.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.8.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.8.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: opencontrail
- openstack_neutron_bgp_vpn: 'False'
- openstack_neutron_bgp_vpn_driver: bagpipe
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_nova_compute_reserved_host_memory_mb: '900'
- openstack_proxy_address: 10.167.8.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.8.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.8.82
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.8.19
- openstack_version: pike
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
- salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
- salt_master_address: 10.167.8.15
- salt_master_hostname: cfg01
- salt_master_management_address: 172.16.49.66
- shared_reclass_branch: ''
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- stacklight_enabled: 'True'
- stacklight_log_address: 10.167.8.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.8.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.8.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.8.63
- stacklight_log_node03_hostname: log03
- stacklight_long_term_storage_type: prometheus
- stacklight_monitor_address: 10.167.8.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.8.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.8.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.8.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 10.167.8.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.8.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.8.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.8.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 192.168.0.220
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 192.168.0.0/24
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'True'
- openldap_domain: cookied-bm-4.0-contrail.local
- openldap_enabled: 'True'
- openldap_organisation: ${_param:cluster_name}
- ceph_enabled: 'True'
- ceph_version: "luminous"
- ceph_hyper_converged: "False"
- ceph_osd_backend: "bluestore"
- ceph_osd_count: "3"
- ceph_osd_node_count: 3
- ceph_osd_block_db_size: 20
- ceph_osd_journal_size: 20
- ceph_osd_bond_mode: "active-backup"
- ceph_osd_data_partition_prefix: ""
-
- ceph_public_network_allocation: storage
- ceph_public_network: "10.167.8.0/24"
- ceph_cluster_network: "10.167.8.0/24"
-
- # For compatibility with 2018.8.0
- ceph_osd_rack01_deploy_subnet: '172.16.49'
- ceph_osd_rack01_single_subnet: '10.167.8'
- ceph_osd_rack01_backend_subnet: '10.167.8'
-
-# for 2018.11.0+
-# ceph_osd_deploy_address_ranges: "172.16.49.70-172.16.49.72"
-# ceph_osd_storage_address_ranges: "10.167.47.200-10.167.47.202"
-# ceph_osd_backend_address_ranges: "10.167.49.200-10.167.49.202"
-
- ceph_osd_data_disks: "/dev/sdb"
- ceph_osd_journal_or_block_db_disks: "/dev/sdb"
- ceph_osd_mode: "separated"
- ceph_osd_deploy_nic: "eth0"
- ceph_osd_primary_first_nic: "eth1"
- ceph_osd_primary_second_nic: "eth2"
- #ceph_mon_node01_address: "172.16.47.66"
- #ceph_mon_node01_deploy_address: "172.16.48.66"
- ceph_mon_node01_address: "10.167.8.66"
- ceph_mon_node01_hostname: "cmn01"
- #ceph_mon_node02_address: "172.16.47.67"
- #ceph_mon_node02_deploy_address: "172.16.48.67"
- ceph_mon_node02_address: "10.167.8.67"
- ceph_mon_node02_hostname: "cmn02"
- #ceph_mon_node03_address: "172.16.47.68"
- #ceph_mon_node03_deploy_address: "172.16.48.68"
- ceph_mon_node03_address: "10.167.8.68"
- ceph_mon_node03_hostname: "cmn03"
- #ceph_rgw_address: "172.16.47.75"
- ceph_rgw_address: "10.167.8.75"
- #ceph_rgw_node01_address: "172.16.47.76"
- #ceph_rgw_node01_deploy_address: "172.16.48.76"
- ceph_rgw_node01_address: "10.167.8.76"
- ceph_rgw_node01_hostname: "rgw01"
- #ceph_rgw_node02_address: "172.16.47.77"
- #ceph_rgw_node02_deploy_address: "172.16.48.77"
- ceph_rgw_node02_address: "10.167.8.77"
- ceph_rgw_node02_hostname: "rgw02"
- #ceph_rgw_node03_address: "172.16.47.78"
- #ceph_rgw_node03_deploy_address: "172.16.48.78"
- ceph_rgw_node03_address: "10.167.8.78"
- ceph_rgw_node03_hostname: "rgw03"
- manila_enabled: 'False'
- barbican_enabled: 'False'
- barbican_integration_enabled: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt.yaml
deleted file mode 100644
index da93592..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt.yaml
+++ /dev/null
@@ -1,82 +0,0 @@
-{% from 'cookied-cicd-bm-os-contrail40-maas-2018.8.0/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-cicd-bm-os-contrail40-maas-2018.8.0/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-cicd-bm-os-contrail40-maas-2018.8.0/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-- description: Wait for salt-master is ready after configdrive user-data
- cmd: |
- timeout 120 salt-call test.ping
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: Prepare maas
- cmd: |
- salt-call state.sls maas.cluster;
- salt-call state.sls maas.region;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Generate a public key for machines in MAAS
- cmd: |
- ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub
- maas mirantis sshkeys create key="$(cat ~root/.ssh/id_rsa.pub)"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Run comissioning of BM nodes
- cmd: |
- salt-call maas.process_machines
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Wait for machines ready
- cmd: |
- salt-call maas.machines_status && timeout 120 salt-call state.sls maas.machines.wait_for_ready
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 7, delay: 5}
- skip_fail: false
-
-- description: Enforce the interfaces configuration defined in the model for servers
- cmd: |
- salt-call state.sls maas.machines.assign_ip;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Remove all the salt-minions and re-register the cfg01 minion
- cmd: |
- salt-key -y -D;
- salt-call test.ping
- sleep 5
- # Check that the cfg01 is registered
- salt-key | grep cfg01
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: provision the automatically commissioned physical nodes through MAAS
- cmd: |
- salt-call maas.deploy_machines;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: Wait for machines deployed
- cmd: |
- salt-call maas.machines_status && timeout 300 salt-call state.sls maas.machines.wait_for_deployed
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 6, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/underlay.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/underlay.yaml
deleted file mode 100644
index ac301bd..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/underlay.yaml
+++ /dev/null
@@ -1,129 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-
-#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-bm-os-contrail40-maas-2018.8.0') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
-
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
-
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-cicd-bm-os-contrail40-maas-2018.8.0_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
- params:
- ip_reserved:
- gateway: '172.16.49.65'
- l2_network_device: +61
- default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
- ip_ranges:
- dhcp: [+2, -3]
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- ip_ranges:
- dhcp: [+2, -3]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- ip_ranges:
- dhcp: [+2, -3]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: -2
- ip_ranges:
- dhcp: [+2, -3]
-
- groups:
-
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-
- network_pools:
- admin: admin-pool01
-
- l2_network_devices:
- admin:
- address_pool: admin-pool01
- dhcp: false
- forward:
- mode: bridge
- parent_iface:
- phys_dev: !os_env LAB_MANAGEMENT_IFACE
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: bridge
- parent_iface:
- phys_dev: !os_env LAB_CONTROL_IFACE
-
- group_volumes:
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: config
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
-
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/lab04-physical-inventory.yaml
index 27b5d25..e2788ce 100644
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/lab04-physical-inventory.yaml
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/lab04-physical-inventory.yaml
@@ -18,12 +18,12 @@
- infra_kvm
- linux_system_codename_xenial
interfaces:
- enp9s0f0:
- role: single_dhcp
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
- ens11f0:
- role: single_mgm_manual
+ enp8s0f0:
+ role: single_mgm_dhcp
+ ens4f1:
+ role: bond_ctl_contrail_lacp
+ ens11f1:
+ role: bond_ctl_contrail_lacp
kvm02.cookied-cicd-bm-os-contrail40-maas.local:
reclass_storage_name: infra_kvm_node02
@@ -31,12 +31,12 @@
- infra_kvm
- linux_system_codename_xenial
interfaces:
- enp9s0f0:
- role: single_dhcp
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
- ens11f0:
- role: single_mgm_manual
+ enp8s0f0:
+ role: single_mgm_dhcp
+ ens4f1:
+ role: bond_ctl_contrail_lacp
+ ens11f1:
+ role: bond_ctl_contrail_lacp
kvm03.cookied-cicd-bm-os-contrail40-maas.local:
reclass_storage_name: infra_kvm_node03
@@ -44,12 +44,54 @@
- infra_kvm
- linux_system_codename_xenial
interfaces:
- enp9s0f0:
- role: single_dhcp
- enp9s0f1:
- role: bond0_ab_ovs_vlan_ctl
- ens11f0:
- role: single_mgm_manual
+ enp8s0f0:
+ role: single_mgm_dhcp
+ ens4f1:
+ role: bond_ctl_contrail_lacp
+ ens11f1:
+ role: bond_ctl_contrail_lacp
+
+ kvm04.cookied-cicd-bm-os-contrail40-maas.local:
+ reclass_storage_name: infra_kvm_node04
+ roles:
+ - infra_kvm_wo_gluster
+ - linux_system_codename_xenial
+ - salt_master_host
+ interfaces:
+ enp8s0f0:
+ role: single_mgm_dhcp
+ ens4f1:
+ role: bond_ctl_contrail_lacp
+ ens11f1:
+ role: bond_ctl_contrail_lacp
+
+ kvm05.cookied-cicd-bm-os-contrail40-maas.local:
+ reclass_storage_name: infra_kvm_node05
+ roles:
+ - infra_kvm_wo_gluster
+ - linux_system_codename_xenial
+ - salt_master_host
+ interfaces:
+ enp8s0f0:
+ role: single_mgm_dhcp
+ ens4f1:
+ role: bond_ctl_contrail_lacp
+ ens11f1:
+ role: bond_ctl_contrail_lacp
+
+ kvm06.cookied-cicd-bm-os-contrail40-maas.local:
+ reclass_storage_name: infra_kvm_node06
+ roles:
+ - infra_kvm_wo_gluster
+ - linux_system_codename_xenial
+ - salt_master_host
+ interfaces:
+ enp8s0f0:
+ role: single_mgm_dhcp
+ ens4f1:
+ role: bond_ctl_contrail_lacp
+ ens11f1:
+ role: bond_ctl_contrail_lacp
osd<<count>>:
reclass_storage_name: ceph_osd_rack01
@@ -57,11 +99,14 @@
- ceph_osd
- linux_system_codename_xenial
interfaces:
- enp2s0f0:
+ eno1:
role: single_dhcp
- enp2s0f1:
- role: single_vlan_ctl
+ ens1f1:
+ role: bond_ctl_contrail_lacp
+ ens2f1:
+ role: bond_ctl_contrail_lacp
# role: bond0_ab_vlan_ceph_storage_backend
+# todo: add storage net for ceph to second lacp bond
cmp<<count>>:
reclass_storage_name: openstack_compute_rack01
@@ -69,9 +114,13 @@
- openstack_compute
- linux_system_codename_xenial
interfaces:
- enp2s0f1:
+ eno1:
role: single_dhcp
- enp5s0f0:
- role: bond0_ab_contrail
- enp5s0f1:
- role: single_vlan_ctl
+ ens1f0:
+ role: bond_ctl_contrail_lacp
+ ens1f1:
+ role: bond_contrail_lacp
+ ens2f0:
+ role: bond_ctl_contrail_lacp
+ ens2f1:
+ role: bond_contrail_lacp
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-cookiecutter-contrail.yaml
index 1a50ff3..5116cd7 100644
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-cookiecutter-contrail.yaml
@@ -103,6 +103,15 @@
infra_kvm03_control_address: 10.167.8.243
infra_kvm03_deploy_address: 172.16.49.69
infra_kvm03_hostname: kvm03
+ infra_kvm04_control_address: 10.167.8.244
+ infra_kvm04_deploy_address: 172.16.49.70
+ infra_kvm04_hostname: kvm04
+ infra_kvm05_control_address: 10.167.8.245
+ infra_kvm05_deploy_address: 172.16.49.71
+ infra_kvm05_hostname: kvm05
+ infra_kvm06_control_address: 10.167.8.246
+ infra_kvm06_deploy_address: 172.16.49.72
+ infra_kvm06_hostname: kvm06
infra_kvm_vip_address: 10.167.8.240
infra_primary_first_nic: eth1
infra_primary_second_nic: eth2
@@ -116,139 +125,188 @@
maas_deploy_cidr: 172.16.49.64/26
maas_deploy_gateway: 172.16.49.65
maas_deploy_range_end: 172.16.49.119
- maas_deploy_range_start: 172.16.49.77
+ maas_deploy_range_start: 172.16.49.78
maas_deploy_vlan: '0'
maas_dhcp_enabled: 'True'
maas_fabric_name: fabric-0
maas_hostname: cfg01
maas_manage_deploy_network: 'True'
maas_machines: |
- kvm01: # cz7341-kvm.host-telecom.com
+ kvm01: # cz8062-kvm.host-telecom.com
distro_series: "xenial"
# hwe_kernel: "hwe-16.04"
# pxe_interface_mac:
- pxe_interface_mac: "0c:c4:7a:6c:83:56"
+ pxe_interface_mac: "0c:c4:7a:a8:d3:44"
interfaces:
- enp9s0f0:
- mac: "0c:c4:7a:6c:83:56"
+ enp8s0f0:
+ mac: "0c:c4:7a:a8:d3:44"
mode: "static"
ip: "172.16.49.67"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
power_parameters:
- power_address: "5.43.225.117"
+ power_address: "5.43.227.106"
power_pass: ==IPMI_PASS==
power_type: ipmi
power_user: ==IPMI_USER==
- kvm02: # #cz7342-kvm.host-telecom.com
+ kvm02: # #cz8063-kvm.host-telecom.com
distro_series: "xenial"
# hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:6c:84:2c"
+ pxe_interface_mac: "0c:c4:7a:a8:b8:18"
interfaces:
- enp9s0f0:
- mac: "0c:c4:7a:6c:84:2c"
+ enp8s0f0:
+ mac: "0c:c4:7a:a8:b8:18"
mode: "static"
ip: "172.16.49.68"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
power_parameters:
- power_address: "5.43.225.118"
+ power_address: "5.43.227.107"
power_pass: ==IPMI_PASS==
power_type: ipmi
power_user: ==IPMI_USER==
- kvm03: # #cz7343-kvm.host-telecom.com
+ kvm03: # #cz8064-kvm.host-telecom.com
distro_series: "xenial"
# hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:6c:83:54"
+ pxe_interface_mac: "0c:c4:7a:a8:d0:40"
interfaces:
- enp9s0f0:
- mac: "0c:c4:7a:6c:83:54"
+ enp8s0f0:
+ mac: "0c:c4:7a:a8:d0:40"
mode: "static"
ip: "172.16.49.69"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
power_parameters:
- power_address: "5.43.225.119"
+ power_address: "5.43.227.108"
power_pass: ==IPMI_PASS==
power_type: ipmi
power_user: ==IPMI_USER==
- osd001: # #cz7343-kvm.host-telecom.com
+ kvm04: # cz8065-kvm.host-telecom.com
distro_series: "xenial"
# hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:55:6a:d4"
+ # pxe_interface_mac:
+ pxe_interface_mac: "0c:c4:7a:a8:b8:22"
interfaces:
- enp2s0f0:
- mac: "0c:c4:7a:55:6a:d4"
+ enp8s0f0:
+ mac: "0c:c4:7a:a8:b8:22"
mode: "static"
ip: "172.16.49.70"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
power_parameters:
- power_address: "185.8.59.243"
+ power_address: "5.43.227.110"
power_pass: ==IPMI_PASS==
power_type: ipmi
power_user: ==IPMI_USER==
- osd002: # #cz7343-kvm.host-telecom.com
+ kvm05: # #cz8066-kvm.host-telecom.com
distro_series: "xenial"
# hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:55:6a:56"
+ pxe_interface_mac: "0c:c4:7a:a8:b8:1a"
interfaces:
- enp2s0f0:
- mac: "0c:c4:7a:55:6a:56"
+ enp8s0f0:
+ mac: "0c:c4:7a:a8:b8:1a"
mode: "static"
ip: "172.16.49.71"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
power_parameters:
- power_address: "185.8.59.244"
+ power_address: "5.43.227.111"
power_pass: ==IPMI_PASS==
power_type: ipmi
power_user: ==IPMI_USER==
- osd003: # #cz7343-kvm.host-telecom.com
+ kvm06: # #cz8067-kvm.host-telecom.com
distro_series: "xenial"
# hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:55:6a:2a"
+ pxe_interface_mac: "0c:c4:7a:a8:b8:1c"
interfaces:
- enp2s0f0:
- mac: "0c:c4:7a:55:6a:2a"
+ enp8s0f0:
+ mac: "0c:c4:7a:a8:b8:1c"
mode: "static"
ip: "172.16.49.72"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
power_parameters:
- power_address: "185.8.59.245"
+ power_address: "5.43.227.112"
power_pass: ==IPMI_PASS==
power_type: ipmi
power_user: ==IPMI_USER==
- cmp001: # #cz7345-kvm.host-telecom.com
+ osd001: # #cz5272-kvm.host-telecom.com
distro_series: "xenial"
# hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:54:a2:5f"
+ pxe_interface_mac: "0c:c4:7a:aa:51:f8"
interfaces:
- enp2s0f1:
- mac: "0c:c4:7a:54:a2:5f"
+ eno1:
+ mac: "0c:c4:7a:aa:51:f8"
mode: "static"
ip: "172.16.49.73"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
power_parameters:
- power_address: "185.8.59.233"
+ power_address: "5.43.225.182"
power_pass: ==IPMI_PASS==
power_type: ipmi
power_user: ==IPMI_USER==
- cmp002: # cz7346-kvm.host-telecom.com
+ osd002: # #cz7857-kvm.host-telecom.com
distro_series: "xenial"
# hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:54:a0:51"
+ pxe_interface_mac: "0c:c4:7a:6d:3a:80"
interfaces:
- enp2s0f1:
- mac: "0c:c4:7a:54:a0:51"
+ eno1:
+ mac: "0c:c4:7a:6d:3a:80"
mode: "static"
ip: "172.16.49.74"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
power_parameters:
- power_address: "185.8.59.232"
+ power_address: "5.43.225.199"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd003: # #cz7787-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6b:f7:7a"
+ interfaces:
+ eno1:
+ mac: "0c:c4:7a:6b:f7:7a"
+ mode: "static"
+ ip: "172.16.49.75"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.123"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp001: # #cz7987-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:a8:72:ac"
+ interfaces:
+ eno1:
+ mac: "0c:c4:7a:a8:72:ac"
+ mode: "static"
+ ip: "172.16.49.76"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.181"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp002: # cz7842-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6d:3a:c6"
+ interfaces:
+ eno1:
+ mac: "0c:c4:7a:6d:3a:c6"
+ mode: "static"
+ ip: "172.16.49.77"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "5.43.225.201"
power_pass: ==IPMI_PASS==
power_type: ipmi
power_user: ==IPMI_USER==
@@ -278,7 +336,6 @@
opencontrail_router01_hostname: rtr01
opencontrail_router02_address: 10.167.8.101
opencontrail_router02_hostname: rtr02
- openldap_enabled: 'False'
openssh_groups: ''
openstack_benchmark_node01_address: 10.167.8.95
openstack_benchmark_node01_hostname: bmk01
@@ -286,7 +343,7 @@
openstack_compute_count: '2'
openstack_compute_rack01_hostname: cmp
openstack_compute_single_address_ranges: 10.167.8.101-10.167.8.102
- openstack_compute_deploy_address_ranges: 172.16.49.73-172.16.49.74
+ openstack_compute_deploy_address_ranges: 172.16.49.76-172.16.49.77
openstack_compute_tenant_address_ranges: 10.167.10.101-10.167.10.102
openstack_compute_backend_address_ranges: 10.167.10.101-10.167.10.102
openstack_control_address: 10.167.8.10
@@ -397,7 +454,7 @@
# for 2018.11.0+
ceph_osd_single_address_ranges: "10.167.8.200-10.167.8.202"
- ceph_osd_deploy_address_ranges: "172.16.49.70-172.16.49.72"
+ ceph_osd_deploy_address_ranges: "172.16.49.73-172.16.49.75"
ceph_osd_storage_address_ranges: "10.167.8.200-10.167.8.202"
ceph_osd_backend_address_ranges: "10.167.10.200-10.167.10.202"
@@ -442,4 +499,6 @@
openstack_mysql_x509_enabled: 'True'
rabbitmq_ssl_enabled: 'True'
openstack_rabbitmq_x509_enabled: 'True'
- openstack_internal_protocol: 'https'
\ No newline at end of file
+ openstack_internal_protocol: 'https'
+ cinder_backup_engine: 'ceph'
+ cinder_ceph_backup_pool_name: 'backups'
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/underlay.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/underlay.yaml
index 0a5f4f2..e48b817 100644
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/underlay.yaml
@@ -98,7 +98,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
volumes:
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
index 5665cd1..cb9a221 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
@@ -82,6 +82,7 @@
control_network_subnet: 10.167.4.0/24
control_vlan: '10'
cookiecutter_template_branch: ''
+ jenkins_pipelines_branch: 'release/2019.2.0'
cookiecutter_template_credentials: gerrit
cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 10.167.5.1
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
deleted file mode 100644
index b8bda7e..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
+++ /dev/null
@@ -1,175 +0,0 @@
-default_context:
- auditd_enabled: 'False'
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- bmk_enabled: 'False'
- calico_cni_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/cni:latest
- calico_enable_nat: 'True'
- calico_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/node:latest
- calico_netmask: '16'
- calico_network: 192.168.0.0
- calicoctl_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/ctl:latest
- ceph_enabled: 'False'
- cicd_control_node01_address: 10.167.4.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.4.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.4.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.4.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEA0vs3yV4GVrnMDT1i1rc3DKVCU4kQ6D7OCDhJ10kiA4VZ/9Td
- CIKGxL3+kzNHBqEB3HcjsGJ7zTud3S97Zd21HzTeF0iia7kQXZs3zw3GnU4IWCzd
- zeyJenD1twrDILc1Q80SPIw8klbgK3T8Wcs0sACyfAORTKnM4Alrq3PSyn2G/L7s
- wL+4OYWw8+V1Bo6f/1zsVdJWFelZ1HNd1fuGP9Egx+PSHznmBQniewM2AktZVHkd
- e2j+sIMnimRr55ic6tEiPKb1sp+mdqfxUbkxU1q3r80Bnq5lbYNexJZLZFbY8pBF
- w5qtVoXsrUra0xT66yDs9z9kgTXZt8MppSPEywIDAQABAoIBAD60d3IbxdqEwga1
- Vejm7y+M9leJh0LfV7DNufSIQdm2CnektkTPNmrG5SCuvs9TjxANQMgtnQn0TAjv
- EcfGywwJVHCrH8rQZ/LKLR4WxA8AoC5Et296e6muZPkP88qHaQdLrb3dGCuOMnX4
- AQBXCsO8kU+WZ9rXJJL5ecytUdroRZnmChUN9sZFoCrrFmCJyKzSdmbtyHwvmc6t
- K9YHx5HcQppdnbaUT/PcRNjNgOdQsGrYG/4tGpZE9GYqBRrtEZAFKnRDDlaq9mvc
- FJ451YhomyUNtM7hor+VpSi9WtgiXh9fWUfJUIH4Ven9y29I7SNAxftfs0Xxdz2O
- aEYnhPECgYEA9YKsXtslfSsjyG3Z6NTy9peAjBlUSA7jkbsfOIcN1qLAxSlOMIVG
- P9knWa5rgp1nOssHxpbCJaiZv1r2C0sHZ+B/ZCNV1lCdz+pN+Y0AQHz2/VrQEtkR
- cvClpWXjACT0igJ5mrmehzV166CI3t7+nCrkRd9aYTuVpNP4o9aEuGcCgYEA2/7f
- 2UVcq8SKJYAJD0mNc4mfnNwmyvIfwJqQfechPPwXG3kmd2jgFdUV3JyCwS771TRz
- 9QAHBLoBgKSVKLBORsTjYy7TSIYvnTUgrymMwc75nevVIy258hPSYpF6cVEIRnNr
- sek5heYQxJZr9RraxihvoaZytmPRm918sR6B4f0CgYEAlizQc1VpoR76TGelm55m
- 4B/cKdZ0j39MBKCJgHJcLKZxdCjIAzYCupuCToE6kjLmKjh3ESq2p4JySXLCfjXu
- 2cOhKQfUQbweTEfuWm+9b7UBAAjErkLJQZ2iNYIVUMlKLAFHkTVpmxtAflk8X9fX
- tn8mEveEuWVRK/ndZZqapJECgYA3IQSpZsdVR/gyc4ZRrWXkCR3VahnSi6BHXLRO
- yKe8p5OGz/JCxCY7cl17HkFp9cMn53ATekFH/vC3cwbp3lyPQXGV/jr2FqJB6/lX
- y7q5KovE9j9ABIpvTmZPSxN66AqB1RSszbwbgM685NEC6Arg02s9//8JE7SIMZW4
- sONtZQKBgQDZoa9yAM9YRRFUpgiyKdmp4Yzq3xc17xhlrv3HgAKXzCwlRXm+TxKP
- kmwFI2nn3sPN9jSegCGyOtfzoh4Q7DtBowrjLPUisWk1gLZH4HpCP7mkndR9ODI3
- So+yjY6Y787NTuAtS97T1AfOPQR8VOm9vuGT1IRWNno3ckThokPcCg==
- -----END RSA PRIVATE KEY-----
- cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDS+zfJXgZWucwNPWLWtzcMpUJTiRDoPs4IOEnXSSIDhVn/1N0IgobEvf6TM0cGoQHcdyOwYnvNO53dL3tl3bUfNN4XSKJruRBdmzfPDcadTghYLN3N7Il6cPW3CsMgtzVDzRI8jDySVuArdPxZyzSwALJ8A5FMqczgCWurc9LKfYb8vuzAv7g5hbDz5XUGjp//XOxV0lYV6VnUc13V+4Y/0SDH49IfOeYFCeJ7AzYCS1lUeR17aP6wgyeKZGvnmJzq0SI8pvWyn6Z2p/FRuTFTWrevzQGermVtg17ElktkVtjykEXDmq1WheytStrTFPrrIOz3P2SBNdm3wymlI8TL
- cluster_domain: cookied-cicd-k8s-calico.local
- cluster_name: cookied-cicd-k8s-calico
- context_seed: 6RD8HFGk0xksGje6RcIiGRkHIIkdvHUDgBuUsCTYDv5Tw4DmVMbRlRVnatzGHYJd
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.4.0/24
- control_vlan: '10'
- cookiecutter_template_branch: ''
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 10.167.5.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 10.167.5.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- etcd_ssl: 'True'
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.4.241
- infra_kvm01_deploy_address: 10.167.5.91
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.4.242
- infra_kvm02_deploy_address: 10.167.5.92
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.4.243
- infra_kvm03_deploy_address: 10.167.5.93
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.4.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- internal_proxy_enabled: 'False'
- kqueen_custom_mail_enabled: 'False'
- kqueen_enabled: 'False'
- kubernetes_control_address: 10.167.4.10
- kubernetes_control_node01_address: 10.167.4.11
- kubernetes_control_node01_deploy_address: 10.167.5.11
- kubernetes_control_node01_hostname: ctl01
- kubernetes_control_node02_address: 10.167.4.12
- kubernetes_control_node02_deploy_address: 10.167.5.12
- kubernetes_control_node02_hostname: ctl02
- kubernetes_control_node03_address: 10.167.4.13
- kubernetes_control_node03_deploy_address: 10.167.5.13
- kubernetes_control_node03_hostname: ctl03
- kubernetes_compute_count: 4
- kubernetes_compute_rack01_hostname: cmp
- kubernetes_compute_deploy_address_ranges: 10.167.5.101-10.167.5.104
- kubernetes_compute_single_address_ranges: 10.167.4.101-10.167.4.104
- kubernetes_compute_tenant_address_ranges: 10.167.6.101-10.167.6.104
- kubernetes_enabled: 'True'
- kubernetes_externaldns_enabled: 'False'
- kubernetes_keepalived_vip_interface: br_ctl
- kubernetes_network_calico_enabled: 'True'
- kubernetes_virtlet_enabled: 'False'
- kubernetes_proxy_hostname: prx
- kubernetes_proxy_node01_hostname: prx01
- kubernetes_proxy_node02_hostname: prx02
- kubernetes_proxy_address: 10.167.4.220
- kubernetes_proxy_node01_address: 10.167.4.221
- kubernetes_proxy_node02_address: 10.167.4.222
- local_repositories: 'False'
- maas_deploy_address: 10.167.5.15
- maas_deploy_range_end: 10.167.5.199
- maas_deploy_range_start: 10.167.5.180
- maas_deploy_vlan: '0'
- maas_fabric_name: deploy-fabric0
- maas_hostname: cfg01
- mcp_common_scripts_branch: ''
- mcp_version: proposed
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openldap_domain: ${_param:cluster_name}.local
- openldap_enabled: 'True'
- openldap_organisation: ${_param:cluster_name}
- openssh_groups: cicd
- openstack_enabled: 'False'
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- platform: kubernetes_enabled
- public_host: ${_param:infra_config_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- salt_api_password: GItjqF2mBE9JpA6WjnqD4pKNJMWJK72g
- salt_api_password_hash: $6$XxwWWczf$tFbAgdW1PeVJWTn0Jw/xfwJlss/RgOf9fGWqx2XE7vZ5O/ZGR1AuIgl/HH7Qm3.ZxvutaWmfWszxWcPFZepzv.
- salt_master_address: 10.167.4.15
- salt_master_hostname: cfg01
- salt_master_management_address: 10.167.5.15
- shared_reclass_branch: 'proposed'
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- stacklight_enabled: 'False'
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.167.6.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.167.6.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml
deleted file mode 100644
index 94b248f..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-{% from 'cookied-cicd-k8s-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-cicd-k8s-calico/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-cicd-k8s-calico/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604-swp.yaml
deleted file mode 100644
index 81936a4..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604-swp.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
deleted file mode 100644
index 5b87397..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
+++ /dev/null
@@ -1,630 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-k8s-calico') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
-
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP03 = os_env('HOSTNAME_CMP03', 'cmp003.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP04 = os_env('HOSTNAME_CMP04', 'cmp004.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX02 = os_env('HOSTNAME_PRX02', 'prx02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-
-{% import 'cookied-cicd-k8s-calico/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-cicd-k8s-calico/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'cookied-cicd-k8s-calico/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
-
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
- default_{{ HOSTNAME_PRX01 }}: +221
- default_{{ HOSTNAME_PRX02 }}: +222
-
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
-
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
-
- default_{{ HOSTNAME_CTL }}: +10
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_CMP01 }}: +101
- default_{{ HOSTNAME_CMP02 }}: +102
- default_{{ HOSTNAME_CMP03 }}: +103
- default_{{ HOSTNAME_CMP04 }}: +104
- default_{{ HOSTNAME_LOG }}: +60
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MON }}: +70
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_MTR }}: +85
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_PRX01 }}: +221
- default_{{ HOSTNAME_PRX02 }}: +222
- default_{{ HOSTNAME_KVM }}: +240
- default_{{ HOSTNAME_KVM01 }}: +241
- default_{{ HOSTNAME_KVM02 }}: +242
- default_{{ HOSTNAME_KVM03 }}: +243
-
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
-
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
-
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
-
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
- default_{{ HOSTNAME_PRX01 }}: +221
- default_{{ HOSTNAME_PRX02 }}: +222
-
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: {{ os_env('CONNECTION_STRING', 'qemu:///system') }}
- storage_pool_name: {{ os_env('STORAGE_POOL_NAME', 'default') }}
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: {{ os_env('DRIVER_USE_HOST_CPU', true) }}
- use_hugepages: {{ os_env('DRIVER_USE_HUGEPAGES', false) }}
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- external:
- address_pool: external-pool01
- dhcp: false
- forward:
- mode: nat
-
-
- group_volumes:
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604') }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
- memory: {{ os_env('CFG_NODE_MEMORY', 12288) }}
- boot:
- - hd
- volumes:
- - name: system
- capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
- backing_store: cfg01_day01_image
- format: qcow2
- - name: config
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
- # it will be uploaded after config drive generation
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_KVM01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
- memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_KVM02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
- memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KVM03 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
- memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID03 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL01 }}
- role: k8s_controller
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP04 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
index df80d80..8c68d15 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
@@ -24,6 +24,7 @@
control_network_subnet: 10.167.4.0/24
control_vlan: '10'
cookiecutter_template_branch: ''
+ jenkins_pipelines_branch: 'release/2019.2.0'
cookiecutter_template_credentials: gerrit
cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 10.167.5.1
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
deleted file mode 100644
index 01366c3..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
+++ /dev/null
@@ -1,214 +0,0 @@
-default_context:
- auditd_enabled: 'False'
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_control_node01_address: 10.167.4.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.4.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.4.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.4.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
- 3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
- AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
- xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
- B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
- q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
- s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
- V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
- 9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
- pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
- MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
- 7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
- udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
- R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
- XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
- Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
- KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
- 6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
- ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
- ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
- Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
- r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
- mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
- qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
- 9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
- -----END RSA PRIVATE KEY-----
- cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
- cluster_domain: cookied-cicd-pike-dpdk.local
- cluster_name: cookied-cicd-pike-dpdk
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.4.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 10.167.5.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 10.167.5.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.4.241
- infra_kvm01_deploy_address: 10.167.5.91
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.4.242
- infra_kvm02_deploy_address: 10.167.5.92
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.4.243
- infra_kvm03_deploy_address: 10.167.5.93
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.4.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 10.167.5.15
- maas_deploy_range_end: 10.167.5.199
- maas_deploy_range_start: 10.167.5.180
- maas_deploy_vlan: '0'
- maas_fabric_name: deploy-fabric0
- maas_hostname: cfg01
- mcp_version: proposed
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openldap_domain: ${_param:cluster_name}.local
- openldap_enabled: 'True'
- openldap_organisation: ${_param:cluster_name}
- openssh_groups: cicd
- openstack_benchmark_node01_address: 10.167.4.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.4
- openstack_compute_rack01_tenant_subnet: 10.167.6
- openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
- openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
- openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
- openstack_control_address: 10.167.4.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.4.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.4.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.4.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.4.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.4.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.4.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.4.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 10.167.4.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.167.6.6
- openstack_gateway_node02_address: 10.167.4.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.167.6.7
- openstack_gateway_node03_address: 10.167.4.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.167.6.8
- openstack_message_queue_address: 10.167.4.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.4.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.4.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.4.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'True'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_hugepages_count: '2048'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_nova_cpu_pinning: '4,5,8,9,10,11'
- openstack_ovs_dvr_enabled: 'False'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.17.16.80 # external network endpoint
- openstack_proxy_vip_interface: ens5
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.4.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.4.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.4.19
- openstack_version: pike
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 10.167.4.15
- salt_master_hostname: cfg01
- salt_master_management_address: 10.167.5.15
- shared_reclass_branch: 'proposed'
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- stacklight_enabled: 'False'
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.167.6.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.167.6.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
- openstack_create_public_network: 'False'
- openstack_public_neutron_subnet_gateway: 172.17.16.1
- openstack_public_neutron_subnet_cidr: 172.17.16.0/24
- openstack_public_neutron_subnet_allocation_start: 172.17.16.201
- openstack_public_neutron_subnet_allocation_end: 172.17.16.245
- manila_enabled: 'False'
- barbican_enabled: 'False'
- barbican_integration_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml
deleted file mode 100644
index 26f7983..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml
+++ /dev/null
@@ -1,237 +0,0 @@
-nodes:
- cfg01:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- - features_runtest
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_static_ctl
-
- kvm01:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- kvm02:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- kvm03:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- cid01:
- reclass_storage_name: cicd_control_node01
- roles:
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- cid02:
- reclass_storage_name: cicd_control_node02
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- cid03:
- reclass_storage_name: cicd_control_node03
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- dbs01:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- dbs02:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- dbs03:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- msg01:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- msg02:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- msg03:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01:
- reclass_storage_name: openstack_proxy_node01
- roles:
- #- openstack_proxy # another VIP interface used
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: single_external
- external_address: 172.17.16.121
- external_network_netmask: 255.255.255.0
-
- # Generator-based computes. For compatibility only
- cmp<<count>>:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- dpdkport0:
- role: bond2_dpdk_prv
- dpdk_pci: "0000:00:05.0"
- dpdkport1:
- role: bond2_dpdk_prv
- dpdk_pci: "0000:00:06.0"
- ens7:
- role: bond1_ab_ovs_floating
-
- gtw01:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh_no_tag
- ens6:
- role: bond0_ab_ovs_vxlan_mesh_no_tag
- ens7:
- role: single_ovs_br_floating
- external_address: 10.90.0.110
- external_network_netmask: 255.255.255.0
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml
deleted file mode 100644
index 64abc07..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-{% from 'cookied-cicd-pike-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-cicd-pike-dpdk/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-cicd-pike-dpdk/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-cicd-pike-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-cicd-pike-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-cicd-pike-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-- description: Enable hugepages on cmp nodes
- cmd: salt 'cmp*' cmd.run "apt-get install -y hugepages; echo 2048 > /proc/sys/vm/nr_hugepages";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--user-data1604-swp.yaml
deleted file mode 100644
index 81936a4..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--user-data1604-swp.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml
deleted file mode 100644
index ba916b3..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml
+++ /dev/null
@@ -1,817 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-cicd-pike-dpdk/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-cicd-pike-dpdk/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-pike-dpdk') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS = os_env('HOSTNAME_DBS', 'dbs.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS01 = os_env('HOSTNAME_DBS01', 'dbs01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS02 = os_env('HOSTNAME_DBS02', 'dbs02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS03 = os_env('HOSTNAME_DBS03', 'dbs03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG = os_env('HOSTNAME_MSG', 'msg.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG01 = os_env('HOSTNAME_MSG01', 'msg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG02 = os_env('HOSTNAME_MSG02', 'msg02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG03 = os_env('HOSTNAME_MSG03', 'msg03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-cicd-pike-dpdk_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_KVM }}: +240
- default_{{ HOSTNAME_KVM01 }}: +241
- default_{{ HOSTNAME_KVM02 }}: +242
- default_{{ HOSTNAME_KVM03 }}: +243
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_KVM }}: +240
- default_{{ HOSTNAME_KVM01 }}: +241
- default_{{ HOSTNAME_KVM02 }}: +242
- default_{{ HOSTNAME_KVM03 }}: +243
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: True
- use_hugepages: True
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: {{ os_env('MCP_IMAGE_PATH1604') }} # http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2
- format: qcow2
-
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
- memory: {{ os_env('CFG_NODE_MEMORY', 12288) }}
- boot:
- - hd
- volumes:
- - name: system
- capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
- backing_store: cfg01_day01_image
- format: qcow2
- - name: config
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
- # it will be uploaded after config drive generation
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_DBS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_DBS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_DBS03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MSG01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MSG02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MSG03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: external
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 12
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- numa:
- - cpus: 0,1,2,3,4,5
- memory: 4096
- - cpus: 6,7,8,9,10,11
- memory: 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: e1000
- - label: ens6
- l2_network_device: tenant
- interface_model: e1000
- - label: ens7
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - tenant
- ens7:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 12
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- numa:
- - cpus: 0,1,2,3,4,5
- memory: 4096
- - cpus: 6,7,8,9,10,11
- memory: 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_KVM01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
- memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KVM02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
- memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KVM03 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
- memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID03 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
index 2c94f11..e6ed5eb 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
@@ -117,6 +117,7 @@
control_network_subnet: 10.167.4.0/24
control_vlan: '10'
cookiecutter_template_branch: 'proposed'
+ jenkins_pipelines_branch: 'release/2019.2.0'
cookiecutter_template_credentials: gerrit
cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 10.167.5.1
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
deleted file mode 100644
index be70caa..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
+++ /dev/null
@@ -1,242 +0,0 @@
-default_context:
- auditd_enabled: 'False'
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_control_node01_address: 10.167.4.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.4.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.4.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.4.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
- 3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
- AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
- xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
- B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
- q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
- s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
- V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
- 9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
- pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
- MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
- 7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
- udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
- R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
- XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
- Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
- KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
- 6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
- ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
- ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
- Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
- r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
- mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
- qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
- 9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
- -----END RSA PRIVATE KEY-----
- cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
- cluster_domain: cookied-cicd-pike-dvr-sl.local
- cluster_name: cookied-cicd-pike-dvr-sl
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.4.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 10.167.5.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 10.167.5.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.4.241
- infra_kvm01_deploy_address: 10.167.5.91
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.4.242
- infra_kvm02_deploy_address: 10.167.5.92
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.4.243
- infra_kvm03_deploy_address: 10.167.5.93
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.4.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 10.167.5.15
- maas_deploy_range_end: 10.167.5.199
- maas_deploy_range_start: 10.167.5.180
- maas_deploy_vlan: '0'
- maas_fabric_name: deploy-fabric0
- maas_hostname: cfg01
- mcp_version: proposed
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openldap_domain: ${_param:cluster_name}.local
- openldap_enabled: 'True'
- openldap_organisation: ${_param:cluster_name}
- openssh_groups: cicd
- openstack_benchmark_node01_address: 10.167.4.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.4
- openstack_compute_rack01_tenant_subnet: 10.167.6
- openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
- openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
- openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
- openstack_control_address: 10.167.4.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.4.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.4.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.4.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.4.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 10.167.4.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 10.167.4.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 10.167.4.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 10.167.4.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.167.6.6
- openstack_gateway_node02_address: 10.167.4.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.167.6.7
- openstack_gateway_node03_address: 10.167.4.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.167.6.8
- openstack_message_queue_address: 10.167.4.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 10.167.4.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 10.167.4.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 10.167.4.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.17.16.80 # external network endpoint
- openstack_proxy_vip_interface: ens5
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.4.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.4.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.4.19
- openstack_version: pike
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 10.167.4.15
- salt_master_hostname: cfg01
- salt_master_management_address: 10.167.5.15
- shared_reclass_branch: 'proposed'
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 10.167.4.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.4.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.4.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.4.63
- stacklight_log_node03_hostname: log03
- stacklight_monitor_address: 10.167.4.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.4.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.4.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.4.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 10.167.4.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.4.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.4.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.4.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- stacklight_long_term_storage_type: prometheus
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.167.6.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.167.6.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
- openstack_create_public_network: 'False'
- openstack_public_neutron_subnet_gateway: 172.17.16.1
- openstack_public_neutron_subnet_cidr: 172.17.16.0/24
- openstack_public_neutron_subnet_allocation_start: 172.17.16.201
- openstack_public_neutron_subnet_allocation_end: 172.17.16.245
- manila_enabled: 'False'
- barbican_enabled: 'False'
- barbican_integration_enabled: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
deleted file mode 100644
index f735daa..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
+++ /dev/null
@@ -1,269 +0,0 @@
-nodes:
- cfg01:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- - features_runtest
- #classes:
- #- environment.cookied-cicd-pike-dvr-sl.override_ntp_virtual
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_static_ctl
-
- kvm01:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- kvm02:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- kvm03:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- cid01:
- reclass_storage_name: cicd_control_node01
- roles:
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- cid02:
- reclass_storage_name: cicd_control_node02
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- cid03:
- reclass_storage_name: cicd_control_node03
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01:
- reclass_storage_name: openstack_proxy_node01
- roles:
- #- openstack_proxy # another VIP interface used
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: single_external
- external_address: 172.17.16.121
- external_network_netmask: 255.255.255.0
-
- mon01:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log01:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log02:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log03:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr01:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr02:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr03:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/overrides-policy.yml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/overrides-policy.yml
deleted file mode 100644
index 1f35a6b..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/overrides-policy.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-parameters:
- nova:
- controller:
- policy:
- context_is_admin: 'role:admin or role:administrator'
- 'compute:create': 'rule:admin_or_owner'
- 'compute:create:attach_network':
- cinder:
- controller:
- policy:
- 'volume:delete': 'rule:admin_or_owner'
- 'volume:extend':
- neutron:
- server:
- policy:
- create_subnet: 'rule:admin_or_network_owner'
- 'get_network:queue_id': 'rule:admin_only'
- 'create_network:shared':
- glance:
- server:
- policy:
- publicize_image: "role:admin"
- add_member:
- keystone:
- server:
- policy:
- admin_or_token_subject: 'rule:admin_required or rule:token_subject'
- heat:
- server:
- policy:
- context_is_admin: 'role:admin and is_admin_project:True'
- deny_stack_user: 'not role:heat_stack_user'
- deny_everybody: '!'
- 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
- 'cloudformation:DescribeStackResources':
- ceilometer:
- server:
- policy:
- segregation: 'rule:context_is_admin'
- 'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
deleted file mode 100644
index a38f2f3..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-{% from 'cookied-cicd-pike-dvr-sl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-cicd-pike-dvr-sl/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-cicd-pike-dvr-sl/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--user-data1604-swp.yaml
deleted file mode 100644
index 81936a4..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--user-data1604-swp.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
deleted file mode 100644
index 256081f..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
+++ /dev/null
@@ -1,884 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-cicd-pike-dvr-sl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-cicd-pike-dvr-sl/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-pike-dvr-sl') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-cicd-pike-dvr-sl_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_KVM }}: +240
- default_{{ HOSTNAME_KVM01 }}: +241
- default_{{ HOSTNAME_KVM02 }}: +242
- default_{{ HOSTNAME_KVM03 }}: +243
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_KVM }}: +240
- default_{{ HOSTNAME_KVM01 }}: +241
- default_{{ HOSTNAME_KVM02 }}: +242
- default_{{ HOSTNAME_KVM03 }}: +243
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+180, +220]
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: false
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: {{ os_env('MCP_IMAGE_PATH1604') }} # http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
- memory: {{ os_env('CFG_NODE_MEMORY', 12288) }}
- boot:
- - hd
- volumes:
- - name: system
- capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
- backing_store: cfg01_day01_image
- format: qcow2
- - name: config
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
- # it will be uploaded after config drive generation
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: external
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_KVM01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
- memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KVM02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
- memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KVM03 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
- memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID03 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
deleted file mode 100644
index 9956b91..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
+++ /dev/null
@@ -1,280 +0,0 @@
-default_context:
- auditd_enabled: 'False'
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_control_node01_address: 10.167.4.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.4.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.4.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.4.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
- 3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
- AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
- xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
- B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
- q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
- s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
- V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
- 9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
- pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
- MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
- 7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
- udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
- R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
- XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
- Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
- KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
- 6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
- ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
- ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
- Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
- r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
- mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
- qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
- 9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
- -----END RSA PRIVATE KEY-----
- cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
- cluster_domain: cookied-cicd-pike-ovs-sl.local
- cluster_name: cookied-cicd-pike-ovs-sl
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.4.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 10.167.5.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 10.167.5.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.4.241
- infra_kvm01_deploy_address: 10.167.5.91
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.4.242
- infra_kvm02_deploy_address: 10.167.5.92
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.4.243
- infra_kvm03_deploy_address: 10.167.5.93
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.4.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 10.167.5.15
- maas_deploy_range_end: 10.167.5.199
- maas_deploy_range_start: 10.167.5.180
- maas_deploy_vlan: '0'
- maas_fabric_name: deploy-fabric0
- maas_hostname: cfg01
- mcp_version: proposed
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openldap_domain: ${_param:cluster_name}.local
- openldap_enabled: 'True'
- openldap_organisation: ${_param:cluster_name}
- openssh_groups: cicd
- openstack_benchmark_node01_address: 10.167.4.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.4
- openstack_compute_rack01_tenant_subnet: 10.167.6
- openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
- openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
- openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
- openstack_control_address: 10.167.4.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.4.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.4.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.4.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.4.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 10.167.4.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 10.167.4.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 10.167.4.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 10.167.4.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.167.6.6
- openstack_gateway_node02_address: 10.167.4.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.167.6.7
- openstack_gateway_node03_address: 10.167.4.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.167.6.8
- openstack_message_queue_address: 10.167.4.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 10.167.4.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 10.167.4.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 10.167.4.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'False'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.17.16.80 # external network endpoint
- openstack_proxy_vip_interface: ens5
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.4.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.4.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.4.19
- openstack_version: pike
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- octavia_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEAtjnPDJsQToHBtoqIo15mdSYpfi8z6DFMi8Gbo0KCN33OUn5u
- OctbdtjUfeuhvI6px1SCnvyWi09Ft8eWwq+KwLCGKbUxLvqKltuJ7K3LIrGXkt+m
- qZN4O9XKeVKfZH+mQWkkxRWgX2r8RKNV3GkdNtd74VjhP+R6XSKJQ1Z8b7eHM10v
- 6IjTY/jPczjK+eyCeEj4qbSnV8eKlqLhhquuSQRmUO2DRSjLVdpdf2BB4/BdWFsD
- YOmX7mb8kpEr9vQ+c1JKMXDwD6ehzyU8kE+1kVm5zOeEy4HdYIMpvUfN49P1anRV
- 2ISQ1ZE+r22IAMKl0tekrGH0e/1NP1DF5rINMwIDAQABAoIBAQCkP/cgpaRNHyg8
- ISKIHs67SWqdEm73G3ijgB+JSKmW2w7dzJgN//6xYUAnP/zIuM7PnJ0gMQyBBTMS
- NBTv5spqZLKJZYivj6Tb1Ya8jupKm0jEWlMfBo2ZYVrfgFmrfGOfEebSvmuPlh9M
- vuzlftmWVSSUOkjODmM9D6QpzgrbpktBuA/WpX+6esMTwJpOcQ5xZWEnHXnVzuTc
- SncodVweE4gz6F1qorbqIJz8UAUQ5T0OZTdHzIS1IbamACHWaxQfixAO2s4+BoUK
- ANGGZWkfneCxx7lthvY8DiKn7M5cSRnqFyDToGqaLezdkMNlGC7v3U11FF5blSEW
- fL1o/HwBAoGBAOavhTr8eqezTchqZvarorFIq7HFWk/l0vguIotu6/wlh1V/KdF+
- aLLHgPgJ5j+RrCMvTBoKqMeeHfVGrS2udEy8L1mK6b3meG+tMxU05OA55abmhYn7
- 7vF0q8XJmYIHIXmuCgF90R8Piscb0eaMlmHW9unKTKo8EOs5j+D8+AMJAoGBAMo4
- 8WW+D3XiD7fsymsfXalf7VpAt/H834QTbNZJweUWhg11eLutyahyyfjjHV200nNZ
- cnU09DWKpBbLg7d1pyT69CNLXpNnxuWCt8oiUjhWCUpNqVm2nDJbUdlRFTzYb2fS
- ZC4r0oQaPD5kMLSipjcwzMWe0PniySxNvKXKInFbAoGBAKxW2qD7uKKKuQSOQUft
- aAksMmEIAHWKTDdvOA2VG6XvX5DHBLXmy08s7rPfqW06ZjCPCDq4Velzvgvc9koX
- d/lP6cvqlL9za+x6p5wjPQ4rEt/CfmdcmOE4eY+1EgLrUt314LHGjjG3ScWAiirE
- QyDrGOIGaYoQf89L3KqIMr0JAoGARYAklw8nSSCUvmXHe+Gf0yKA9M/haG28dCwo
- 780RsqZ3FBEXmYk1EYvCFqQX56jJ25MWX2n/tJcdpifz8Q2ikHcfiTHSI187YI34
- lKQPFgWb08m1NnwoWrY//yx63BqWz1vjymqNQ5GwutC8XJi5/6Xp+tGGiRuEgJGH
- EIPUKpkCgYAjBIVMkpNiLCREZ6b+qjrPV96ed3iTUt7TqP7yGlFI/OkORFS38xqC
- hBP6Fk8iNWuOWQD+ohM/vMMnvIhk5jwlcwn+kF0ra04gi5KBFWSh/ddWMJxUtPC1
- 2htvlEc6zQAR6QfqXHmwhg1hP81JcpqpicQzCMhkzLoR1DC6stXdLg==
- -----END RSA PRIVATE KEY-----
- octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2Oc8MmxBOgcG2ioijXmZ1Jil+LzPoMUyLwZujQoI3fc5Sfm45y1t22NR966G8jqnHVIKe/JaLT0W3x5bCr4rAsIYptTEu+oqW24nsrcsisZeS36apk3g71cp5Up9kf6ZBaSTFFaBfavxEo1XcaR0213vhWOE/5HpdIolDVnxvt4czXS/oiNNj+M9zOMr57IJ4SPiptKdXx4qWouGGq65JBGZQ7YNFKMtV2l1/YEHj8F1YWwNg6ZfuZvySkSv29D5zUkoxcPAPp6HPJTyQT7WRWbnM54TLgd1ggym9R83j0/VqdFXYhJDVkT6vbYgAwqXS16SsYfR7/U0/UMXmsg0z
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 10.167.4.15
- salt_master_hostname: cfg01
- salt_master_management_address: 10.167.5.15
- shared_reclass_branch: 'proposed'
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 10.167.4.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.4.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.4.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.4.63
- stacklight_log_node03_hostname: log03
- stacklight_monitor_address: 10.167.4.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.4.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.4.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.4.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 10.167.4.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.4.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.4.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.4.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- stacklight_long_term_storage_type: influxdb
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.167.6.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.167.6.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
- openstack_octavia_enabled: 'True'
- octavia_health_manager_node01_address: 192.168.1.10
- octavia_health_manager_node02_address: 192.168.1.11
- octavia_health_manager_node03_address: 192.168.1.12
- octavia_manager_cluster: 'False'
- octavia_hm_bind_ip: 192.168.1.12
- octavia_lb_mgmt_cidr: 192.168.1.0/24
- octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
- octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
- openstack_create_public_network: 'False'
- openstack_public_neutron_subnet_gateway: 172.17.16.1
- openstack_public_neutron_subnet_cidr: 172.17.16.0/24
- openstack_public_neutron_subnet_allocation_start: 172.17.16.201
- openstack_public_neutron_subnet_allocation_end: 172.17.16.245
- manila_enabled: 'False'
- barbican_enabled: 'False'
- barbican_integration_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
deleted file mode 100644
index 4905e32..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-{% from 'cookied-cicd-pike-ovs-sl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-cicd-pike-ovs-sl/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-cicd-pike-ovs-sl/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml
deleted file mode 100644
index 0fc0d86..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml
+++ /dev/null
@@ -1,885 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-cicd-pike-ovs-sl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-cicd-pike-ovs-sl/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-pike-ovs-sl') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-cicd-pike-ovs-sl_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_KVM }}: +240
- default_{{ HOSTNAME_KVM01 }}: +241
- default_{{ HOSTNAME_KVM02 }}: +242
- default_{{ HOSTNAME_KVM03 }}: +243
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_KVM }}: +240
- default_{{ HOSTNAME_KVM01 }}: +241
- default_{{ HOSTNAME_KVM02 }}: +242
- default_{{ HOSTNAME_KVM03 }}: +243
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: false
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: {{ os_env('MCP_IMAGE_PATH1604') }} # http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
- memory: {{ os_env('CFG_NODE_MEMORY', 12288) }}
- boot:
- - hd
- volumes:
- - name: system
- capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
- backing_store: cfg01_day01_image
- format: qcow2
- - name: config
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
- # it will be uploaded after config drive generation
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: external
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_KVM01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
- memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KVM02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
- memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KVM03 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
- memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID03 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_swp
-
- interfaces: *interfaces
- network_config: *network_config
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml
index 863fd81..4d395b3 100644
--- a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml
@@ -54,7 +54,8 @@
control_network_netmask: 255.255.255.0
control_network_subnet: 10.167.4.0/24
control_vlan: '10'
- cookiecutter_template_branch: master
+ cookiecutter_template_branch: ''
+ jenkins_pipelines_branch: 'release/2019.2.0'
cookiecutter_template_credentials: gerrit
cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: 10.167.5.1
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
deleted file mode 100644
index 725ff1c..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
+++ /dev/null
@@ -1,159 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-pike-dpdk.local
- cluster_name: cookied-mcp-pike-dpdk
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: ${_param:openstack_control_node01_address}
-# infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: ${_param:openstack_control_node01_hostname}
- infra_kvm02_control_address: ${_param:openstack_control_node02_address}
-# infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: ${_param:openstack_control_node02_hostname}
- infra_kvm03_control_address: ${_param:openstack_control_node03_address}
-# infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: ${_param:openstack_control_node03_hostname}
- infra_kvm_vip_address: ${_param:openstack_control_address}
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.4.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.4.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.4.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.4.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 10.167.4.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.4.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.4.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.4.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'True'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_hugepages_count: '2048'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_nova_cpu_pinning: '4,5,8,9,10,11'
- openstack_ovs_dvr_enabled: 'False'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: pike
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_branch: master
- shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
- stacklight_enabled: 'False'
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
deleted file mode 100644
index e6f71cf..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
+++ /dev/null
@@ -1,169 +0,0 @@
-nodes:
- cfg01.cookied-mcp-pike-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.cookied-mcp-pike-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.cookied-mcp-pike-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.cookied-mcp-pike-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- dbs01.cookied-mcp-pike-dpdk.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- dbs02.cookied-mcp-pike-dpdk.local:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- dbs03.cookied-mcp-pike-dpdk.local:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- msg01.cookied-mcp-pike-dpdk.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- msg02.cookied-mcp-pike-dpdk.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- msg03.cookied-mcp-pike-dpdk.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.cookied-mcp-pike-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.cookied-mcp-pike-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- dpdkport0:
- role: bond2_dpdk_prv
- dpdk_pci: "0000:00:05.0"
- dpdkport1:
- role: bond2_dpdk_prv
- dpdk_pci: "0000:00:06.0"
- ens7:
- role: bond1_ab_ovs_floating
-
- gtw01.cookied-mcp-pike-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh_no_tag
- ens6:
- role: bond0_ab_ovs_vxlan_mesh_no_tag
- ens7:
- role: single_ovs_br_floating
- external_address: 10.90.0.110
- external_network_netmask: 255.255.255.0
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/core.yaml
deleted file mode 100644
index 5716d76..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/core.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml
deleted file mode 100644
index 20b2fa6..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml
deleted file mode 100644
index 140eb8c..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-- description: Enable hugepages on cmp nodes
- cmd: salt 'cmp*' cmd.run "apt-get install -y hugepages; echo 2048 > /proc/sys/vm/nr_hugepages";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-- description: "Workaround to avoid reboot cmp nodes: bring OVS interfaces UP"
- cmd: |
- salt 'cmp*' cmd.run "ifup br-prv";
- salt 'cmp*' cmd.run "ip l set up br-floating";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data1604.yaml
deleted file mode 100644
index f8b58f5..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml
deleted file mode 100644
index 1dba85e..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml
+++ /dev/null
@@ -1,628 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-pike-dpdk/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-pike-dpdk/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-dpdk') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS = os_env('HOSTNAME_DBS', 'dbs.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS01 = os_env('HOSTNAME_DBS01', 'dbs01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS02 = os_env('HOSTNAME_DBS02', 'dbs02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS03 = os_env('HOSTNAME_DBS03', 'dbs03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG = os_env('HOSTNAME_MSG', 'msg.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG01 = os_env('HOSTNAME_MSG01', 'msg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG02 = os_env('HOSTNAME_MSG02', 'msg02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG03 = os_env('HOSTNAME_MSG03', 'msg03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-pike-dpdk_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: True
- use_hugepages: True
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
-
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_DBS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_DBS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_DBS03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MSG01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MSG02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MSG03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 12
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- numa:
- - cpus: 0,1,2,3,4,5
- memory: 4096
- - cpus: 6,7,8,9,10,11
- memory: 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: e1000
- - label: ens6
- l2_network_device: tenant
- interface_model: e1000
- - label: ens7
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - tenant
- ens7:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 12
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- numa:
- - cpus: 0,1,2,3,4,5
- memory: 4096
- - cpus: 6,7,8,9,10,11
- memory: 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml
deleted file mode 100644
index baf1ba0..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml
+++ /dev/null
@@ -1,225 +0,0 @@
-default_context:
- barbican_backend: dogtag
- barbican_enabled: 'True'
- barbican_integration_enabled: 'False'
- auditd_enabled: 'True'
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-pike-dvr-ssl-barbican.local
- cluster_name: cookied-mcp-pike-dvr-ssl-barbican
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- maas_enabled: 'False'
- mcp_version: proposed
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_dns_hostname: dns
- openstack_dns_node01_address: 172.16.10.113
- openstack_dns_node01_hostname: dns01
- openstack_dns_node02_address: 172.16.10.114
- openstack_dns_node02_hostname: dns02
- openstack_version: pike
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'False'
- stacklight_log_address: 172.16.10.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 172.16.10.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 172.16.10.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 172.16.10.63
- stacklight_log_node03_hostname: log03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 172.16.10.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 172.16.10.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 172.16.10.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 172.16.10.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- stacklight_long_term_storage_type: influxdb
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
- rsync_fernet_rotation: 'True'
- compute_padding_with_zeros: False
- designate_backend: bind
- designate_enabled: 'False'
- nova_vnc_tls_enabled: 'True'
- galera_ssl_enabled: 'True'
- openstack_mysql_x509_enabled: 'True'
- rabbitmq_ssl_enabled: 'True'
- openstack_rabbitmq_x509_enabled: 'True'
- openstack_internal_protocol: 'https'
- tenant_telemetry_enabled: 'False'
- gnocchi_aggregation_storage: file
- manila_enabled: 'False'
- manila_share_backend: 'lvm'
- manila_lvm_volume_name: 'manila-volume'
- manila_lvm_devices: '/dev/vdc'
- openstack_share_address: 172.16.10.203
- openstack_share_node01_address: 172.16.10.204
- openstack_share_node01_deploy_address: 192.168.10.204
- openstack_share_hostname: share
- openstack_share_node01_hostname: share01
- openstack_barbican_address: 172.16.10.44
- openstack_barbican_hostname: kmn
- openstack_barbican_node01_address: 172.16.10.45
- openstack_barbican_node01_hostname: kmn01
- openstack_barbican_node02_address: 172.16.10.46
- openstack_barbican_node02_hostname: kmn02
- openstack_barbican_node03_address: 172.16.10.47
- openstack_barbican_node03_hostname: kmn03
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-environment.yaml
deleted file mode 100644
index 876aab9..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-environment.yaml
+++ /dev/null
@@ -1,134 +0,0 @@
-nodes:
- cfg01.mcp-pike-dvr-ssl.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- kmn01.mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_barbican_node01
- roles:
- - openstack_barbican
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- kmn02.mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_barbican_node02
- roles:
- - openstack_barbican
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- kmn03.mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_barbican_node03
- roles:
- - openstack_barbican
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/core.yaml
deleted file mode 100644
index f5a0013..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/core.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/openstack.yaml
deleted file mode 100644
index 627ed30..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/openstack.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
-{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DOGTAG() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_BARBICAN() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/salt.yaml
deleted file mode 100644
index ebfa366..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/salt.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-- description: "Temp fix"
- cmd: |
- set -e;
- apt-get install virtualenv -y;
- apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
- [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
- . /root/venv-reclass-tools/bin/activate;
- pip install git+https://github.com/dis-xcom/reclass-tools;
- reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml
deleted file mode 100644
index fd1527a..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml
deleted file mode 100644
index e1befcb..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml
+++ /dev/null
@@ -1,513 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-pike-dvr-ssl-barbican/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-dvr-ssl-barbican') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KMN01 = os_env('HOSTNAME_KMN01', 'kmn01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KMN02 = os_env('HOSTNAME_KMN02', 'kmn02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KMN03 = os_env('HOSTNAME_KMN03', 'kmn03.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-pike-dvr-ssl-barbican_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_KMN01 }}: +45
- default_{{ HOSTNAME_KMN02 }}: +46
- default_{{ HOSTNAME_KMN03 }}: +47
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_KMN01 }}: +45
- default_{{ HOSTNAME_KMN02 }}: +46
- default_{{ HOSTNAME_KMN03 }}: +47
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_KMN01 }}: +45
- default_{{ HOSTNAME_KMN02 }}: +46
- default_{{ HOSTNAME_KMN03 }}: +47
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_KMN01 }}: +45
- default_{{ HOSTNAME_KMN02 }}: +46
- default_{{ HOSTNAME_KMN03 }}: +47
- ip_ranges:
- dhcp: [+130, +220]
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: false
- forward:
- mode: route
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KMN01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KMN02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KMN03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: manila
- capacity: 20
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: manila
- capacity: 20
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
deleted file mode 100644
index 234a7f3..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
+++ /dev/null
@@ -1,225 +0,0 @@
-default_context:
- barbican_backend: dogtag
- barbican_enabled: 'False'
- barbican_integration_enabled: 'False'
- auditd_enabled: 'True'
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-pike-dvr-ssl.local
- cluster_name: cookied-mcp-pike-dvr-ssl
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- maas_enabled: 'False'
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_dns_hostname: dns
- openstack_dns_node01_address: 172.16.10.113
- openstack_dns_node01_hostname: dns01
- openstack_dns_node02_address: 172.16.10.114
- openstack_dns_node02_hostname: dns02
- openstack_version: pike
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 172.16.10.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 172.16.10.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 172.16.10.63
- stacklight_log_node03_hostname: log03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 172.16.10.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 172.16.10.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 172.16.10.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 172.16.10.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- stacklight_long_term_storage_type: influxdb
- static_ips_on_deploy_network_enabled: 'False'
- openstack_telemetry_address: 172.16.10.96
- openstack_telemetry_hostname: mdb
- openstack_telemetry_node01_address: 172.16.10.97
- openstack_telemetry_node01_hostname: mdb01
- openstack_telemetry_node02_address: 172.16.10.98
- openstack_telemetry_node02_hostname: mdb02
- openstack_telemetry_node03_address: 172.16.10.99
- openstack_telemetry_node03_hostname: mdb03
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
- rsync_fernet_rotation: 'True'
- compute_padding_with_zeros: False
- designate_backend: bind
- designate_enabled: 'False'
- nova_vnc_tls_enabled: 'True'
- galera_ssl_enabled: 'True'
- openstack_mysql_x509_enabled: 'True'
- rabbitmq_ssl_enabled: 'True'
- openstack_rabbitmq_x509_enabled: 'True'
- openstack_internal_protocol: 'https'
- tenant_telemetry_enabled: 'True'
- gnocchi_aggregation_storage: file
- manila_enabled: 'False'
- manila_share_backend: 'lvm'
- manila_lvm_volume_name: 'manila-volume'
- manila_lvm_devices: '/dev/vdc'
- openstack_share_address: 172.16.10.203
- openstack_share_node01_address: 172.16.10.204
- openstack_share_node01_deploy_address: 192.168.10.204
- openstack_share_hostname: share
- openstack_share_node01_hostname: share01
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
deleted file mode 100644
index 158177e..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
+++ /dev/null
@@ -1,233 +0,0 @@
-nodes:
- cfg01.mcp-pike-dvr-ssl.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp-pike-dvr-ssl.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp-pike-dvr-ssl.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp-pike-dvr-ssl.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log01.mcp-pike-dvr-ssl.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log02.mcp-pike-dvr-ssl.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log03.mcp-pike-dvr-ssl.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr01.mcp-pike-dvr-ssl.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr02.mcp-pike-dvr-ssl.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr03.mcp-pike-dvr-ssl.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- mdb01.mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_telemetry_node01
- roles:
- - linux_system_codename_xenial
- - openstack_telemetry
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mdb02.mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_telemetry_node02
- roles:
- - linux_system_codename_xenial
- - openstack_telemetry
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mdb03.mcp-pike-dvr-ssl.local:
- reclass_storage_name: openstack_telemetry_node03
- roles:
- - linux_system_codename_xenial
- - openstack_telemetry
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/core.yaml
deleted file mode 100644
index f3d274a..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/core.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml
deleted file mode 100644
index c1e32ec..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
-{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_REDIS() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GNOCCHI() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_PANKO() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CEILOMETER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_AODH() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/overrides-policy.yml
deleted file mode 100644
index 1f35a6b..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/overrides-policy.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-parameters:
- nova:
- controller:
- policy:
- context_is_admin: 'role:admin or role:administrator'
- 'compute:create': 'rule:admin_or_owner'
- 'compute:create:attach_network':
- cinder:
- controller:
- policy:
- 'volume:delete': 'rule:admin_or_owner'
- 'volume:extend':
- neutron:
- server:
- policy:
- create_subnet: 'rule:admin_or_network_owner'
- 'get_network:queue_id': 'rule:admin_only'
- 'create_network:shared':
- glance:
- server:
- policy:
- publicize_image: "role:admin"
- add_member:
- keystone:
- server:
- policy:
- admin_or_token_subject: 'rule:admin_required or rule:token_subject'
- heat:
- server:
- policy:
- context_is_admin: 'role:admin and is_admin_project:True'
- deny_stack_user: 'not role:heat_stack_user'
- deny_everybody: '!'
- 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
- 'cloudformation:DescribeStackResources':
- ceilometer:
- server:
- policy:
- segregation: 'rule:context_is_admin'
- 'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
deleted file mode 100644
index 9cf67fa..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-- description: "Temp fix"
- cmd: |
- set -e;
- apt-get install virtualenv -y;
- apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
- [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
- . /root/venv-reclass-tools/bin/activate;
- pip install git+https://github.com/dis-xcom/reclass-tools;
- reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
deleted file mode 100644
index 07cfef8..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-sl.yaml' as SHARED_SL with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
-
-{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
-
-{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
-
-{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
-
-{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
-
-{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
-
-{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
-
-{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data1604.yaml
deleted file mode 100644
index fd1527a..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
deleted file mode 100644
index 26456f7..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
+++ /dev/null
@@ -1,809 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-pike-dvr-ssl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-pike-dvr-ssl/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-dvr-ssl') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-pike-dvr-ssl_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_MDB01 }}: +97
- default_{{ HOSTNAME_MDB02 }}: +98
- default_{{ HOSTNAME_MDB03 }}: +99
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_MDB01 }}: +97
- default_{{ HOSTNAME_MDB02 }}: +98
- default_{{ HOSTNAME_MDB03 }}: +99
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_MDB01 }}: +97
- default_{{ HOSTNAME_MDB02 }}: +98
- default_{{ HOSTNAME_MDB03 }}: +99
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_MDB01 }}: +97
- default_{{ HOSTNAME_MDB02 }}: +98
- default_{{ HOSTNAME_MDB03 }}: +99
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+130, +220]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: false
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: manila
- capacity: 20
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: manila
- capacity: 20
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
deleted file mode 100644
index f3535fc..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
+++ /dev/null
@@ -1,239 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-pike-dvr.local
- cluster_name: cookied-mcp-pike-dvr
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- designate_backend: bind
- designate_enabled: 'True'
- openstack_dns_node01_address: 172.16.10.113
- openstack_dns_node02_address: 172.16.10.114
- openstack_version: pike
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- octavia_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpAIBAAKCAQEAtjnPDJsQToHBtoqIo15mdSYpfi8z6DFMi8Gbo0KCN33OUn5u
- OctbdtjUfeuhvI6px1SCnvyWi09Ft8eWwq+KwLCGKbUxLvqKltuJ7K3LIrGXkt+m
- qZN4O9XKeVKfZH+mQWkkxRWgX2r8RKNV3GkdNtd74VjhP+R6XSKJQ1Z8b7eHM10v
- 6IjTY/jPczjK+eyCeEj4qbSnV8eKlqLhhquuSQRmUO2DRSjLVdpdf2BB4/BdWFsD
- YOmX7mb8kpEr9vQ+c1JKMXDwD6ehzyU8kE+1kVm5zOeEy4HdYIMpvUfN49P1anRV
- 2ISQ1ZE+r22IAMKl0tekrGH0e/1NP1DF5rINMwIDAQABAoIBAQCkP/cgpaRNHyg8
- ISKIHs67SWqdEm73G3ijgB+JSKmW2w7dzJgN//6xYUAnP/zIuM7PnJ0gMQyBBTMS
- NBTv5spqZLKJZYivj6Tb1Ya8jupKm0jEWlMfBo2ZYVrfgFmrfGOfEebSvmuPlh9M
- vuzlftmWVSSUOkjODmM9D6QpzgrbpktBuA/WpX+6esMTwJpOcQ5xZWEnHXnVzuTc
- SncodVweE4gz6F1qorbqIJz8UAUQ5T0OZTdHzIS1IbamACHWaxQfixAO2s4+BoUK
- ANGGZWkfneCxx7lthvY8DiKn7M5cSRnqFyDToGqaLezdkMNlGC7v3U11FF5blSEW
- fL1o/HwBAoGBAOavhTr8eqezTchqZvarorFIq7HFWk/l0vguIotu6/wlh1V/KdF+
- aLLHgPgJ5j+RrCMvTBoKqMeeHfVGrS2udEy8L1mK6b3meG+tMxU05OA55abmhYn7
- 7vF0q8XJmYIHIXmuCgF90R8Piscb0eaMlmHW9unKTKo8EOs5j+D8+AMJAoGBAMo4
- 8WW+D3XiD7fsymsfXalf7VpAt/H834QTbNZJweUWhg11eLutyahyyfjjHV200nNZ
- cnU09DWKpBbLg7d1pyT69CNLXpNnxuWCt8oiUjhWCUpNqVm2nDJbUdlRFTzYb2fS
- ZC4r0oQaPD5kMLSipjcwzMWe0PniySxNvKXKInFbAoGBAKxW2qD7uKKKuQSOQUft
- aAksMmEIAHWKTDdvOA2VG6XvX5DHBLXmy08s7rPfqW06ZjCPCDq4Velzvgvc9koX
- d/lP6cvqlL9za+x6p5wjPQ4rEt/CfmdcmOE4eY+1EgLrUt314LHGjjG3ScWAiirE
- QyDrGOIGaYoQf89L3KqIMr0JAoGARYAklw8nSSCUvmXHe+Gf0yKA9M/haG28dCwo
- 780RsqZ3FBEXmYk1EYvCFqQX56jJ25MWX2n/tJcdpifz8Q2ikHcfiTHSI187YI34
- lKQPFgWb08m1NnwoWrY//yx63BqWz1vjymqNQ5GwutC8XJi5/6Xp+tGGiRuEgJGH
- EIPUKpkCgYAjBIVMkpNiLCREZ6b+qjrPV96ed3iTUt7TqP7yGlFI/OkORFS38xqC
- hBP6Fk8iNWuOWQD+ohM/vMMnvIhk5jwlcwn+kF0ra04gi5KBFWSh/ddWMJxUtPC1
- 2htvlEc6zQAR6QfqXHmwhg1hP81JcpqpicQzCMhkzLoR1DC6stXdLg==
- -----END RSA PRIVATE KEY-----
- octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2Oc8MmxBOgcG2ioijXmZ1Jil+LzPoMUyLwZujQoI3fc5Sfm45y1t22NR966G8jqnHVIKe/JaLT0W3x5bCr4rAsIYptTEu+oqW24nsrcsisZeS36apk3g71cp5Up9kf6ZBaSTFFaBfavxEo1XcaR0213vhWOE/5HpdIolDVnxvt4czXS/oiNNj+M9zOMr57IJ4SPiptKdXx4qWouGGq65JBGZQ7YNFKMtV2l1/YEHj8F1YWwNg6ZfuZvySkSv29D5zUkoxcPAPp6HPJTyQT7WRWbnM54TLgd1ggym9R83j0/VqdFXYhJDVkT6vbYgAwqXS16SsYfR7/U0/UMXmsg0z
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- stacklight_enabled: 'True'
- fluentd_enabled: 'True'
- stacklight_log_address: 172.16.10.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 172.16.10.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 172.16.10.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 172.16.10.63
- stacklight_log_node03_hostname: log03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 172.16.10.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 172.16.10.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 172.16.10.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 172.16.10.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- stacklight_long_term_storage_type: prometheus
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
- manila_enabled: 'True'
- manila_share_backend: 'lvm'
- manila_lvm_volume_name: 'manila-volume'
- manila_lvm_devices: '/dev/vdc'
- openstack_share_address: 172.16.10.203
- openstack_share_node01_address: 172.16.10.204
- openstack_share_node01_deploy_address: 192.168.10.204
- openstack_share_hostname: share
- openstack_share_node01_hostname: share01
- openstack_octavia_enabled: 'True'
- octavia_health_manager_node01_address: 192.168.1.10
- octavia_health_manager_node02_address: 192.168.1.11
- octavia_health_manager_node03_address: 192.168.1.12
- octavia_manager_cluster: 'False'
- octavia_hm_bind_ip: 192.168.1.12
- octavia_lb_mgmt_cidr: 192.168.1.0/24
- octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
- octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
-
-
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
deleted file mode 100644
index bcff533..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
+++ /dev/null
@@ -1,232 +0,0 @@
-nodes:
- cfg01.mcp-pike-dvr.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp-pike-dvr.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp-pike-dvr.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp-pike-dvr.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp-pike-dvr.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp-pike-dvr.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp-pike-dvr.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp-pike-dvr.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log01.mcp-pike-dvr.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log02.mcp-pike-dvr.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log03.mcp-pike-dvr.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr01.mcp-pike-dvr.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr02.mcp-pike-dvr.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr03.mcp-pike-dvr.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp-pike-dvr.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp-pike-dvr.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- share01.mcp-pike-dvr.local:
- reclass_storage_name: openstack_share_node01
- roles:
- - openstack_share
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- dns01.mcp-pike-dvr.local:
- reclass_storage_name: openstack_dns_node01
- roles:
- - openstack_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- dns02.mcp-pike-dvr.local:
- reclass_storage_name: openstack_dns_node02
- roles:
- - openstack_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml
deleted file mode 100644
index a39d636..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
deleted file mode 100644
index 59e85e3..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-pike-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-pike-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
-{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{%- if OVERRIDE_POLICY != '' %}
-- description: Upload policy override
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: overrides-policy.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Create custom cluster control class
- cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Rename control classes
- cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
- ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
-{%- endif %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE(INSTALL_BIND=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_MANILA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_OCTAVIA_API() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_OCTAVIA_MANAGER() }}
-
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-pike-dvr/overrides-policy.yml
deleted file mode 100644
index 1f35a6b..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/overrides-policy.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-parameters:
- nova:
- controller:
- policy:
- context_is_admin: 'role:admin or role:administrator'
- 'compute:create': 'rule:admin_or_owner'
- 'compute:create:attach_network':
- cinder:
- controller:
- policy:
- 'volume:delete': 'rule:admin_or_owner'
- 'volume:extend':
- neutron:
- server:
- policy:
- create_subnet: 'rule:admin_or_network_owner'
- 'get_network:queue_id': 'rule:admin_only'
- 'create_network:shared':
- glance:
- server:
- policy:
- publicize_image: "role:admin"
- add_member:
- keystone:
- server:
- policy:
- admin_or_token_subject: 'rule:admin_required or rule:token_subject'
- heat:
- server:
- policy:
- context_is_admin: 'role:admin and is_admin_project:True'
- deny_stack_user: 'not role:heat_stack_user'
- deny_everybody: '!'
- 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
- 'cloudformation:DescribeStackResources':
- ceilometer:
- server:
- policy:
- segregation: 'rule:context_is_admin'
- 'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml
deleted file mode 100644
index c5a459f..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-pike-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-pike-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/sl.yaml
deleted file mode 100644
index 995d21f..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/sl.yaml
+++ /dev/null
@@ -1,256 +0,0 @@
-{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-# Install MongoDB for alerta
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Create MongoDB cluster
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 20}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
- cmd: |
- FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Fluentd service presence: ${FLUENTD_SERVICE}";
- if [[ "$FLUENTD_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- else
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
- cmd: |
- CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Ceilometer service presence: ${CEILO}";
- if [[ "$CEILO" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus,heka.remote_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Launch containers
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data1604.yaml
deleted file mode 100644
index fd1527a..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
deleted file mode 100644
index 32ec67d..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
+++ /dev/null
@@ -1,795 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-pike-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-pike-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-dvr') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-pike-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +113
- default_{{ HOSTNAME_DNS02 }}: +114
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +113
- default_{{ HOSTNAME_DNS02 }}: +114
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +113
- default_{{ HOSTNAME_DNS02 }}: +114
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +113
- default_{{ HOSTNAME_DNS02 }}: +114
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: false
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: manila
- capacity: 20
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: manila
- capacity: 20
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_SHARE01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
deleted file mode 100644
index 80ca7f6..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
+++ /dev/null
@@ -1,199 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-pike-ovs.local
- cluster_name: cookied-mcp-pike-ovs
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'False'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- designate_backend: powerdns
- designate_enabled: 'True'
- openstack_dns_node01_address: 172.16.10.113
- openstack_dns_node02_address: 172.16.10.114
- openstack_version: pike
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 172.16.10.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 172.16.10.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 172.16.10.63
- stacklight_log_node03_hostname: log03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 172.16.10.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 172.16.10.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 172.16.10.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 172.16.10.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- stacklight_long_term_storage_type: influxdb
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
- manila_enabled: 'True'
- manila_share_backend: 'lvm'
- manila_lvm_volume_name: 'manila-volume'
- manila_lvm_devices: '/dev/vdc'
- openstack_share_address: 172.16.10.203
- openstack_share_node01_address: 172.16.10.204
- openstack_share_node01_deploy_address: 192.168.10.204
- openstack_share_hostname: share
- openstack_share_node01_hostname: share01
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
deleted file mode 100644
index 23e0af4..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
+++ /dev/null
@@ -1,233 +0,0 @@
-nodes:
- cfg01.mcp-pike-ovs.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp-pike-ovs.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp-pike-ovs.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp-pike-ovs.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp-pike-ovs.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp-pike-ovs.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp-pike-ovs.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp-pike-ovs.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log01.mcp-pike-ovs.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log02.mcp-pike-ovs.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log03.mcp-pike-ovs.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr01.mcp-pike-ovs.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr02.mcp-pike-ovs.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr03.mcp-pike-ovs.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp-pike-ovs.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp-pike-ovs.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- dns01.mcp-pike-ovs.local:
- reclass_storage_name: openstack_dns_node01
- roles:
- - openstack_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- dns02.mcp-pike-ovs.local:
- reclass_storage_name: openstack_dns_node02
- roles:
- - openstack_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- share01.mcp-pike-ovs.local:
- reclass_storage_name: openstack_share_node01
- roles:
- - openstack_share
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml
deleted file mode 100644
index 06946d4..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-
-# Install support services
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
deleted file mode 100644
index de8e65e..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-pike-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-pike-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE(INSTALL_POWERDNS=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_MANILA() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml
deleted file mode 100644
index 9c13b64..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-pike-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-pike-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/sl.yaml
deleted file mode 100644
index 83f70e5..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/sl.yaml
+++ /dev/null
@@ -1,258 +0,0 @@
-{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-# Install MongoDB for alerta
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Create MongoDB cluster
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 20}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
- cmd: |
- FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Fluentd service presence: ${FLUENTD_SERVICE}";
- if [[ "$FLUENTD_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- else
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
- cmd: |
- CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Ceilometer service presence: ${CEILO}";
- if [[ "$CEILO" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus,heka.remote_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Launch containers
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data1604.yaml
deleted file mode 100644
index fd1527a..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
deleted file mode 100644
index d1c83dd..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
+++ /dev/null
@@ -1,794 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-pike-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-pike-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-pike-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +113
- default_{{ HOSTNAME_DNS02 }}: +114
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +113
- default_{{ HOSTNAME_DNS02 }}: +114
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +113
- default_{{ HOSTNAME_DNS02 }}: +114
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +113
- default_{{ HOSTNAME_DNS02 }}: +114
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: false
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: manila
- capacity: 20
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: manila
- capacity: 20
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_SHARE01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
deleted file mode 100644
index e402f2c..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
+++ /dev/null
@@ -1,241 +0,0 @@
-default_context:
- barbican_backend: dogtag
- barbican_enabled: 'False'
- barbican_integration_enabled: 'False'
- auditd_enabled: 'True'
- backend_network_netmask: 255.255.255.0
- backend_network_subnet: 10.167.4.0/24
- backend_vlan: '10'
- backup_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEpQIBAAKCAQEAuY7v++mza4e75f80GYE2iIdZ30d7yvT6Xym00iD/OxRWNtXe
- rIh7M0X30Q0F2D3hVvPz57axTheOK3xFRVvPoIZjm3fVgwNzQmTyfAZz4TOdTtWx
- 9cye8Bo20qlRpq8wFQMSDhgRv0J1iX6LjJsr8pM1A8q3e4GYnv0DrLBZ1Iq7+T/k
- qzzsT7PuvGvEK63J/DaE6BI73QG+0W0MvblddznwXvXLo/VlBXajWOv37YHiMFMT
- Zap7lTvGVEyxByVEM04Bo7ABF2PEPwGrGL9nOpJ1LSxBCcryNVyZbveFF/e8A1Cj
- 178rD+W4H5p2Agr5A/y3LZpTkyhnTtWXzwT3YwIDAQABAoIBACiUNa8lgHM3j8PQ
- d5hMRZy93M2TWGMeB9Lf0AdT5/0HiYMcakHY5vhjiLpS2sBbZ/gYCXLW5Rdq11Bz
- MMLmPRWhzg6lui+YhZAze0PcNWM+YlxnJy/Vu7xOP0b6eDy3exBdR4mFgfwNkJ6s
- 6d+p34aA4ssdfdqokLPUKQWO21Y7UVYbht6Tv55nd3YMGXHxJ0phitf7/dFsEX9Z
- sPSdWqkYMP2UWQBrFSjxV9Q+kE8OQ1VYDFCRa/9a5QHMrFo/0dOxLkZosTcCHM8A
- H2RHPcKrxFWn7A3eAiA4VCvtM8RX239Bi7Gdvfl1HflSkQwBDUV8F2RZLHM2NU2T
- EGBQcuECgYEA4ZBwZAtJIQ0R35prGLFj+drb/IKr+x2WD9WOZ83cheGSwdCRk/he
- zZ5cCKgmSqg9cDJ4/vraoqmAlwQ4uj4e1TudgHPwdDUPuwoveIbUfUyzdIZMt0s4
- fe61AUhEniIOi09H+E2yHz6OWSw3uA4SKkNsMT4RZc4Nag3Fo86Rrj8CgYEA0piY
- HMYPHposfjVNM0PMU9F1lwQJMdx3a55JYgUc8cMvrsZPzvwJqrGCMNjP4lPwl/AS
- x73yaxcxEYGiG6wDkvx+hujjyAx+sal62EB9ofJGDI7u8L2/0voW53RWvTUBsy8e
- +xOQTewCAAYGLIJnGfEyVqEAu9IPwz3pep8xtd0CgYEAruTusDOr9SuMI0M5LQFG
- UpHnJogvT1smYoqki0osZcZ8ozjT19aps2bJV5EBd7uxP5BzDsl0wtEIuo90aLwH
- 7i/2NIYw9/m4g78nBZ4NnkXdk0karLhvSf3PbPoa8j3X5x6G4DlmFiHL/8pwPY7z
- eL+kYR4OIVC+R+/7wcJGZMMCgYEAqOLg0epvw53mYoxCTgNoACvw/lupOAhS6MY2
- mVn6XVOnkKTO6fIrmmziOGQXSq0APAi2NuL4XrNpkV2BcGmhMCY3Hd/0k8CZdcax
- km0dk1skm/ugWQYCqKIQ7irZSMESjO0UDkwhJKxI6lXqa5VkM2S/dsOFQBp0s6GZ
- 9NFn3y0CgYEAogzKchxouu4BgqHn76W0IB/XeTuiCDSGRv+IwMoghxbPoT6lO920
- OHWoo+bX3VuxpCFkN2fFH6V8WncUrv4ItAgxGftL8h9BhMRKiatwOBAw0vG/CO2G
- CIyvmjhIvpIdAl8i1jIJw1sn/ZVYm8+ZKy4VAqPevc3Ze7WGoMUkFyg=
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5ju/76bNrh7vl/zQZgTaIh1nfR3vK9PpfKbTSIP87FFY21d6siHszRffRDQXYPeFW8/PntrFOF44rfEVFW8+ghmObd9WDA3NCZPJ8BnPhM51O1bH1zJ7wGjbSqVGmrzAVAxIOGBG/QnWJfouMmyvykzUDyrd7gZie/QOssFnUirv5P+SrPOxPs+68a8Qrrcn8NoToEjvdAb7RbQy9uV13OfBe9cuj9WUFdqNY6/ftgeIwUxNlqnuVO8ZUTLEHJUQzTgGjsAEXY8Q/AasYv2c6knUtLEEJyvI1XJlu94UX97wDUKPXvysP5bgfmnYCCvkD/LctmlOTKGdO1ZfPBPdj
- bmk_enabled: 'False'
- ceph_cluster_network: 10.167.4.0/24
- ceph_enabled: 'True'
- ceph_hyper_converged: 'False'
- ceph_mon_node01_address: 10.167.4.66
- ceph_mon_node01_hostname: cmn01
- ceph_mon_node02_address: 10.167.4.67
- ceph_mon_node02_hostname: cmn02
- ceph_mon_node03_address: 10.167.4.68
- ceph_mon_node03_hostname: cmn03
- ceph_osd_backend: bluestore
- ceph_osd_block_db_size: '10'
- ceph_osd_bond_mode: active-backup
- ceph_osd_count: '2'
- ceph_osd_data_disks: /dev/vdb
- ceph_osd_journal_or_block_db_disks: /dev/vdc
- ceph_osd_node_count: '2'
- ceph_osd_journal_size: '10'
- ceph_osd_primary_first_nic: eth1
- ceph_osd_primary_second_nic: eth2
- ceph_osd_rack01_backend_subnet: 10.167.4
- ceph_osd_rack01_hostname: osd
- ceph_osd_rack01_single_subnet: 10.167.4
- ceph_osd_single_address_ranges: 10.167.4.94-10.167.4.95
- ceph_osd_deploy_address_ranges: 10.167.5.94-10.167.5.95
- ceph_osd_backend_address_ranges: 10.167.4.94-10.167.4.95
- ceph_public_network: 10.167.4.0/24
- ceph_rgw_address: 10.167.4.75
- ceph_rgw_hostname: rgw
- ceph_rgw_node01_address: 10.167.4.76
- ceph_rgw_node01_hostname: rgw01
- ceph_rgw_node02_address: 10.167.4.77
- ceph_rgw_node02_hostname: rgw02
- ceph_rgw_node03_address: 10.167.4.78
- ceph_rgw_node03_hostname: rgw03
- ceph_version: luminous
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-queens-dvr-ceph.local
- cluster_name: cookied-mcp-queens-dvr-ceph
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: tekHhhWzn3YrxKbXGMvtWYj1usHGrRBYd2gfFwWNCnRentwCu1QKANHvpIeZCRvz
- control_network_netmask: 255.255.255.0
- control_network_subnet: 10.167.4.0/24
- control_vlan: '10'
- cookiecutter_template_branch: 'proposed'
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
- deploy_network_gateway: 10.167.5.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 10.167.5.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: obutenko@mirantis.com
- gainsight_service_enabled: 'False'
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 10.167.4.11
- infra_kvm01_deploy_address: 10.167.5.11
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.4.12
- infra_kvm02_deploy_address: 10.167.5.12
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.4.13
- infra_kvm03_deploy_address: 10.167.5.13
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.4.10
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- internal_proxy_enabled: 'False'
- kubernetes_ctl_on_kvm: 'False'
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 10.167.5.15
- maas_deploy_network_name: deploy_network
- maas_deploy_range_end: 10.167.5.230
- maas_deploy_range_start: 10.167.5.20
- maas_deploy_vlan: '0'
- maas_enabled: 'False'
- maas_fabric_name: deploy_fabric
- maas_hostname: cfg01
- mcp_common_scripts_branch: 'proposed'
- mcp_version: proposed
- no_platform: 'False'
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openssh_groups: ''
- openstack_benchmark_node01_address: 10.167.4.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.4
- openstack_compute_rack01_tenant_subnet: 10.167.6
- openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
- openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
- openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
- openstack_dns_hostname: dns
- openstack_dns_node01_address: 10.167.4.111
- openstack_dns_node01_hostname: dns01
- openstack_dns_node02_address: 10.167.4.112
- openstack_dns_node02_hostname: dns02
- openstack_control_address: 10.167.4.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.4.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.4.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.4.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.4.10
- openstack_database_hostname: ctl
- openstack_database_node01_address: 10.167.4.11
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 10.167.4.12
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 10.167.4.13
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 10.167.4.224
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.167.6.6
- openstack_gateway_node02_address: 10.167.4.225
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.167.6.7
- openstack_gateway_node03_address: 10.167.4.226
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.167.6.8
- openstack_message_queue_address: 10.167.4.10
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 10.167.4.11
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 10.167.4.12
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 10.167.4.13
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_bgp_vpn: 'False'
- openstack_neutron_bgp_vpn_driver: bagpipe
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_nova_compute_reserved_host_memory_mb: '900'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.17.16.80 # external network endpoint
- openstack_proxy_vip_interface: ens5
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.4.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.4.82
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 10.167.4.19
- openstack_version: queens
- osd_padding_with_zeros: 'False'
- oss_enabled: 'False'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- salt_api_password: BX7ium4MaRPIWBdyhj4LTbiedwg3yLep
- salt_api_password_hash: $6$qYqzkiRP$MiqA5ZMfsmdXJcuTTyeCgNPv9CBGO5nSH4HwRKPGUh0MFXcEa8JDCUEtS8xLHCkol7CMdq.l6CG7of0iaUJ.u.
- salt_master_address: 10.167.4.15
- salt_master_hostname: cfg01
- salt_master_management_address: 10.167.5.15
- shared_reclass_branch: 'proposed'
- shared_reclass_url: https://github.com/Mirantis/reclass-system-salt-model.git
- sriov_network_subnet: 10.55.0.0/16
- stacklight_enabled: 'False'
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.167.6.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.167.6.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'True'
- version: proposed
- vnf_onboarding_enabled: 'False'
- rsync_fernet_rotation: 'True'
- compute_padding_with_zeros: False
- designate_backend: bind
- designate_enabled: 'False'
- nova_vnc_tls_enabled: 'False'
- galera_ssl_enabled: 'False'
- openstack_mysql_x509_enabled: 'False'
- rabbitmq_ssl_enabled: 'False'
- openstack_rabbitmq_x509_enabled: 'False'
- tenant_telemetry_enabled: 'True'
- gnocchi_aggregation_storage: ceph
- openstack_telemetry_address: 172.16.10.83
- openstack_telemetry_hostname: mdb
- openstack_telemetry_node01_address: 172.16.10.84
- openstack_telemetry_node01_hostname: mdb01
- openstack_telemetry_node02_address: 172.16.10.85
- openstack_telemetry_node02_hostname: mdb02
- openstack_telemetry_node03_address: 172.16.10.86
- openstack_telemetry_node03_hostname: mdb03
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml
deleted file mode 100644
index fcce951..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
deleted file mode 100644
index e9cd9a1..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-{% import 'shared-ceph.yaml' as SHARED_CEPH with context %}
-
-{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MONS() }}
-
-{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MGR() }}
-
-{{ SHARED_CEPH.MACRO_INSTALL_CEPH_OSD_AND_RADOSGW() }}
-
-{{ SHARED_CEPH.CONNECT_CEPH_TO_SERVICES() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_REDIS() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GNOCCHI() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_PANKO() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CEILOMETER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_AODH() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/salt.yaml
deleted file mode 100644
index dd8fd17..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/salt.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml
deleted file mode 100644
index fd1527a..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml
deleted file mode 100644
index 850fedf..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml
+++ /dev/null
@@ -1,787 +0,0 @@
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-
-{% import 'cookied-mcp-queens-dvr-ceph/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-queens-dvr-ceph') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW01 = os_env('HOSTNAME_CMN01', 'rgw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW02 = os_env('HOSTNAME_CMN02', 'rgw02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW03 = os_env('HOSTNAME_CMN03', 'rgw03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd1.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd2.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-queens-dvr-ceph_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_MDB01 }}: +84
- default_{{ HOSTNAME_MDB02 }}: +85
- default_{{ HOSTNAME_MDB03 }}: +86
- ip_ranges:
- dhcp: [+70, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_MDB01 }}: +84
- default_{{ HOSTNAME_MDB02 }}: +85
- default_{{ HOSTNAME_MDB03 }}: +86
- ip_ranges:
- dhcp: [+70, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_MDB01 }}: +84
- default_{{ HOSTNAME_MDB02 }}: +85
- default_{{ HOSTNAME_MDB03 }}: +86
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_MDB01 }}: +84
- default_{{ HOSTNAME_MDB02 }}: +85
- default_{{ HOSTNAME_MDB03 }}: +86
- ip_ranges:
- dhcp: [+130, +230]
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: ceph_osd
- capacity: 50
- format: qcow2
- - name: ceph_journal
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: ceph_osd
- capacity: 50
- format: qcow2
- - name: ceph_journal
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_RGW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_RGW02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
- - name: {{ HOSTNAME_RGW03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: external
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml
deleted file mode 100644
index 8d62a09..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml
+++ /dev/null
@@ -1,212 +0,0 @@
-nodes:
- cfg01.cookied-mcp-queens-dvr-ceph.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.cookied-mcp-queens-dvr-ceph.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.cookied-mcp-queens-dvr-ceph.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.cookied-mcp-queens-dvr-ceph.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.cookied-mcp-queens-dvr-ceph.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- #- openstack_proxy # another VIP interface used
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: single_external
- external_address: 172.17.16.121
- external_network_netmask: 255.255.255.0
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.cookied-mcp-queens-dvr-ceph.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.cookied-mcp-queens-dvr-ceph.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- osd<<count>>.cookied-mcp-queens-dvr-ceph.local:
- reclass_storage_name: ceph_osd_rack01
- roles:
- - ceph_osd
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- cmn01.cookied-mcp-queens-dvr-ceph.local:
- reclass_storage_name: ceph_mon_node01
- roles:
- - ceph_mon
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- cmn02.cookied-mcp-queens-dvr-ceph.local:
- reclass_storage_name: ceph_mon_node02
- roles:
- - ceph_mon
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- cmn03.cookied-mcp-queens-dvr-ceph.local:
- reclass_storage_name: ceph_mon_node03
- roles:
- - ceph_mon
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- rgw01.cookied-mcp-queens-dvr-ceph.local:
- reclass_storage_name: ceph_rgw_node01
- roles:
- - ceph_rgw
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- rgw02.cookied-mcp-queens-dvr-ceph.local:
- reclass_storage_name: ceph_rgw_node02
- roles:
- - ceph_rgw
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- rgw03.cookied-mcp-queens-dvr-ceph.local:
- reclass_storage_name: ceph_rgw_node03
- roles:
- - ceph_rgw
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
-
- mdb01.cookied-mcp-queens-dvr-ceph.local:
- reclass_storage_name: openstack_telemetry_node01
- roles:
- - linux_system_codename_xenial
- - openstack_telemetry
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mdb02.cookied-mcp-queens-dvr-ceph.local:
- reclass_storage_name: openstack_telemetry_node02
- roles:
- - linux_system_codename_xenial
- - openstack_telemetry
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mdb03.cookied-mcp-queens-dvr-ceph.local:
- reclass_storage_name: openstack_telemetry_node03
- roles:
- - linux_system_codename_xenial
- - openstack_telemetry
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml
deleted file mode 100644
index e01644c..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml
+++ /dev/null
@@ -1,226 +0,0 @@
-default_context:
- barbican_backend: dogtag
- barbican_enabled: 'True'
- barbican_integration_enabled: 'False'
- auditd_enabled: 'True'
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-queens-dvr-ssl-barbican.local
- cluster_name: cookied-mcp-queens-dvr-ssl-barbican
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- maas_enabled: 'False'
- mcp_version: proposed
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.17.16.80 # external network proxy VIP
- openstack_proxy_vip_interface: ens5
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_dns_hostname: dns
- openstack_dns_node01_address: 172.16.10.113
- openstack_dns_node01_hostname: dns01
- openstack_dns_node02_address: 172.16.10.114
- openstack_dns_node02_hostname: dns02
- openstack_version: queens
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'False'
- stacklight_log_address: 172.16.10.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 172.16.10.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 172.16.10.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 172.16.10.63
- stacklight_log_node03_hostname: log03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 172.16.10.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 172.16.10.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 172.16.10.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 172.16.10.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- stacklight_long_term_storage_type: influxdb
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
- rsync_fernet_rotation: 'True'
- compute_padding_with_zeros: False
- designate_backend: bind
- designate_enabled: 'False'
- nova_vnc_tls_enabled: 'True'
- galera_ssl_enabled: 'True'
- openstack_mysql_x509_enabled: 'True'
- rabbitmq_ssl_enabled: 'True'
- openstack_rabbitmq_x509_enabled: 'True'
- openstack_internal_protocol: 'https'
- tenant_telemetry_enabled: 'False'
- gnocchi_aggregation_storage: file
- manila_enabled: 'False'
- manila_share_backend: 'lvm'
- manila_lvm_volume_name: 'manila-volume'
- manila_lvm_devices: '/dev/vdc'
- openstack_share_address: 172.16.10.203
- openstack_share_node01_address: 172.16.10.204
- openstack_share_node01_deploy_address: 192.168.10.204
- openstack_share_hostname: share
- openstack_share_node01_hostname: share01
- openstack_barbican_address: 172.16.10.44
- openstack_barbican_hostname: kmn
- openstack_barbican_node01_address: 172.16.10.45
- openstack_barbican_node01_hostname: kmn01
- openstack_barbican_node02_address: 172.16.10.46
- openstack_barbican_node02_hostname: kmn02
- openstack_barbican_node03_address: 172.16.10.47
- openstack_barbican_node03_hostname: kmn03
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-environment.yaml
deleted file mode 100644
index ea7c7ec..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-environment.yaml
+++ /dev/null
@@ -1,138 +0,0 @@
-nodes:
- cfg01.mcp-queens-dvr-ssl.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp-queens-dvr-ssl.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp-queens-dvr-ssl.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp-queens-dvr-ssl.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- kmn01.mcp-queens-dvr-ssl.local:
- reclass_storage_name: openstack_barbican_node01
- roles:
- - openstack_barbican
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- kmn02.mcp-queens-dvr-ssl.local:
- reclass_storage_name: openstack_barbican_node02
- roles:
- - openstack_barbican
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- kmn03.mcp-queens-dvr-ssl.local:
- reclass_storage_name: openstack_barbican_node03
- roles:
- - openstack_barbican
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp-queens-dvr-ssl.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- #- openstack_proxy # another VIP interface used
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: single_external
- external_address: 172.17.16.121
- external_network_netmask: 255.255.255.0
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp-queens-dvr-ssl.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp-queens-dvr-ssl.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/core.yaml
deleted file mode 100644
index 4efe25c..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/core.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/openstack.yaml
deleted file mode 100644
index 5a2bdac..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/openstack.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
-{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DOGTAG() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_BARBICAN() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/salt.yaml
deleted file mode 100644
index d786848..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/salt.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-- description: "Temp fix"
- cmd: |
- set -e;
- apt-get install virtualenv -y;
- apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
- [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
- . /root/venv-reclass-tools/bin/activate;
- pip install git+https://github.com/dis-xcom/reclass-tools;
- reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data1604.yaml
deleted file mode 100644
index fd1527a..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml
deleted file mode 100644
index b81481b..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml
+++ /dev/null
@@ -1,528 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-queens-dvr-ssl-barbican/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-queens-dvr-ssl-barbican') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KMN01 = os_env('HOSTNAME_KMN01', 'kmn01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KMN02 = os_env('HOSTNAME_KMN02', 'kmn02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KMN03 = os_env('HOSTNAME_KMN03', 'kmn03.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-queens-dvr-ssl-barbican_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_KMN01 }}: +45
- default_{{ HOSTNAME_KMN02 }}: +46
- default_{{ HOSTNAME_KMN03 }}: +47
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_KMN01 }}: +45
- default_{{ HOSTNAME_KMN02 }}: +46
- default_{{ HOSTNAME_KMN03 }}: +47
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_KMN01 }}: +45
- default_{{ HOSTNAME_KMN02 }}: +46
- default_{{ HOSTNAME_KMN03 }}: +47
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+130, +220]
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: false
- forward:
- mode: route
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KMN01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KMN02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KMN03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: external
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: manila
- capacity: 20
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: manila
- capacity: 20
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-cookiecutter-mcp-queens-dvr-ssl.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-cookiecutter-mcp-queens-dvr-ssl.yaml
deleted file mode 100644
index 4246f94..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-cookiecutter-mcp-queens-dvr-ssl.yaml
+++ /dev/null
@@ -1,226 +0,0 @@
-default_context:
- barbican_backend: dogtag
- barbican_enabled: 'False'
- barbican_integration_enabled: 'False'
- auditd_enabled: 'True'
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-queens-dvr-ssl.local
- cluster_name: cookied-mcp-queens-dvr-ssl
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- maas_enabled: 'False'
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.17.16.80 # external network proxy VIP
- openstack_proxy_vip_interface: ens5
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_dns_hostname: dns
- openstack_dns_node01_address: 172.16.10.113
- openstack_dns_node01_hostname: dns01
- openstack_dns_node02_address: 172.16.10.114
- openstack_dns_node02_hostname: dns02
- openstack_version: queens
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 172.16.10.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 172.16.10.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 172.16.10.63
- stacklight_log_node03_hostname: log03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 172.16.10.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 172.16.10.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 172.16.10.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 172.16.10.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- stacklight_long_term_storage_type: influxdb
- static_ips_on_deploy_network_enabled: 'False'
- openstack_telemetry_address: 172.16.10.96
- openstack_telemetry_hostname: mdb
- openstack_telemetry_node01_address: 172.16.10.97
- openstack_telemetry_node01_hostname: mdb01
- openstack_telemetry_node02_address: 172.16.10.98
- openstack_telemetry_node02_hostname: mdb02
- openstack_telemetry_node03_address: 172.16.10.99
- openstack_telemetry_node03_hostname: mdb03
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
- rsync_fernet_rotation: 'True'
- compute_padding_with_zeros: False
- designate_backend: bind
- designate_enabled: 'False'
- nova_vnc_tls_enabled: 'True'
- galera_ssl_enabled: 'True'
- openstack_mysql_x509_enabled: 'True'
- rabbitmq_ssl_enabled: 'True'
- openstack_rabbitmq_x509_enabled: 'True'
- openstack_internal_protocol: 'https'
- tenant_telemetry_enabled: 'True'
- gnocchi_aggregation_storage: file
- manila_enabled: 'False'
- manila_share_backend: 'lvm'
- manila_lvm_volume_name: 'manila-volume'
- manila_lvm_devices: '/dev/vdc'
- openstack_share_address: 172.16.10.203
- openstack_share_node01_address: 172.16.10.204
- openstack_share_node01_deploy_address: 192.168.10.204
- openstack_share_hostname: share
- openstack_share_node01_hostname: share01
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml
deleted file mode 100644
index 5a90968..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml
+++ /dev/null
@@ -1,237 +0,0 @@
-nodes:
- cfg01.mcp-queens-dvr-ssl.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp-queens-dvr-ssl.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp-queens-dvr-ssl.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp-queens-dvr-ssl.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp-queens-dvr-ssl.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- #- openstack_proxy # another VIP interface used
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: single_external
- external_address: 172.17.16.121
- external_network_netmask: 255.255.255.0
-
- mon01.mcp-queens-dvr-ssl.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp-queens-dvr-ssl.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp-queens-dvr-ssl.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log01.mcp-queens-dvr-ssl.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log02.mcp-queens-dvr-ssl.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log03.mcp-queens-dvr-ssl.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr01.mcp-queens-dvr-ssl.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr02.mcp-queens-dvr-ssl.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr03.mcp-queens-dvr-ssl.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp-queens-dvr-ssl.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp-queens-dvr-ssl.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- mdb01.mcp-queens-dvr-ssl.local:
- reclass_storage_name: openstack_telemetry_node01
- roles:
- - linux_system_codename_xenial
- - openstack_telemetry
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mdb02.mcp-queens-dvr-ssl.local:
- reclass_storage_name: openstack_telemetry_node02
- roles:
- - linux_system_codename_xenial
- - openstack_telemetry
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mdb03.mcp-queens-dvr-ssl.local:
- reclass_storage_name: openstack_telemetry_node03
- roles:
- - linux_system_codename_xenial
- - openstack_telemetry
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/core.yaml
deleted file mode 100644
index e10bccc..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/core.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/openstack.yaml
deleted file mode 100644
index 278b78b..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/openstack.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
-{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_REDIS() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GNOCCHI() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_PANKO() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CEILOMETER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_AODH() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/overrides-policy.yml
deleted file mode 100644
index 1f35a6b..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/overrides-policy.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-parameters:
- nova:
- controller:
- policy:
- context_is_admin: 'role:admin or role:administrator'
- 'compute:create': 'rule:admin_or_owner'
- 'compute:create:attach_network':
- cinder:
- controller:
- policy:
- 'volume:delete': 'rule:admin_or_owner'
- 'volume:extend':
- neutron:
- server:
- policy:
- create_subnet: 'rule:admin_or_network_owner'
- 'get_network:queue_id': 'rule:admin_only'
- 'create_network:shared':
- glance:
- server:
- policy:
- publicize_image: "role:admin"
- add_member:
- keystone:
- server:
- policy:
- admin_or_token_subject: 'rule:admin_required or rule:token_subject'
- heat:
- server:
- policy:
- context_is_admin: 'role:admin and is_admin_project:True'
- deny_stack_user: 'not role:heat_stack_user'
- deny_everybody: '!'
- 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
- 'cloudformation:DescribeStackResources':
- ceilometer:
- server:
- policy:
- segregation: 'rule:context_is_admin'
- 'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/salt.yaml
deleted file mode 100644
index 5b0ce58..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/salt.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-- description: "Temp fix"
- cmd: |
- set -e;
- apt-get install virtualenv -y;
- apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
- [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
- . /root/venv-reclass-tools/bin/activate;
- pip install git+https://github.com/dis-xcom/reclass-tools;
- reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/sl.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/sl.yaml
deleted file mode 100644
index 7d65097..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/sl.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-{% from 'cookied-mcp-queens-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-sl.yaml' as SHARED_SL with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
-
-{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
-
-{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
-
-{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
-
-{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
-
-{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
-
-{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
-
-{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data1604.yaml
deleted file mode 100644
index fd1527a..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay.yaml
deleted file mode 100644
index 3ec33cf..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay.yaml
+++ /dev/null
@@ -1,826 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-queens-dvr-ssl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-queens-dvr-ssl/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-queens-dvr-ssl/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-queens-dvr-ssl') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-queens-dvr-ssl_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_MDB01 }}: +97
- default_{{ HOSTNAME_MDB02 }}: +98
- default_{{ HOSTNAME_MDB03 }}: +99
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_MDB01 }}: +97
- default_{{ HOSTNAME_MDB02 }}: +98
- default_{{ HOSTNAME_MDB03 }}: +99
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_MDB01 }}: +97
- default_{{ HOSTNAME_MDB02 }}: +98
- default_{{ HOSTNAME_MDB03 }}: +99
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_MDB01 }}: +97
- default_{{ HOSTNAME_MDB02 }}: +98
- default_{{ HOSTNAME_MDB03 }}: +99
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+130, +220]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: false
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: external
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: manila
- capacity: 20
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: manila
- capacity: 20
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
deleted file mode 100644
index 0af6a85..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
+++ /dev/null
@@ -1,217 +0,0 @@
-default_context:
- barbican_backend: dogtag
- barbican_enabled: 'False'
- barbican_integration_enabled: 'False'
- auditd_enabled: 'True'
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-queens-dvr.local
- cluster_name: cookied-mcp-queens-dvr
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- maas_enabled: 'False'
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.17.16.80 # external network proxy VIP
- openstack_proxy_vip_interface: ens5
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_dns_hostname: dns
- openstack_dns_node01_address: 172.16.10.113
- openstack_dns_node01_hostname: dns01
- openstack_dns_node02_address: 172.16.10.114
- openstack_dns_node02_hostname: dns02
- openstack_version: queens
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 172.16.10.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 172.16.10.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 172.16.10.63
- stacklight_log_node03_hostname: log03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 172.16.10.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 172.16.10.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 172.16.10.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 172.16.10.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- stacklight_long_term_storage_type: influxdb
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
- rsync_fernet_rotation: 'True'
- compute_padding_with_zeros: False
- designate_backend: bind
- designate_enabled: 'True'
- nova_vnc_tls_enabled: 'False'
- galera_ssl_enabled: 'False'
- openstack_mysql_x509_enabled: 'False'
- rabbitmq_ssl_enabled: 'False'
- openstack_rabbitmq_x509_enabled: 'False'
- tenant_telemetry_enabled: 'False'
- gnocchi_aggregation_storage: file
- manila_enabled: 'True'
- manila_share_backend: 'lvm'
- manila_lvm_volume_name: 'manila-volume'
- manila_lvm_devices: '/dev/vdc'
- openstack_share_address: 172.16.10.203
- openstack_share_node01_address: 172.16.10.204
- openstack_share_node01_deploy_address: 192.168.10.204
- openstack_share_hostname: share
- openstack_share_node01_hostname: share01
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
deleted file mode 100644
index ce8de58..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
+++ /dev/null
@@ -1,237 +0,0 @@
-nodes:
- cfg01.mcp-queens-dvr.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp-queens-dvr.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp-queens-dvr.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp-queens-dvr.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp-queens-dvr.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- #- openstack_proxy # another VIP interface used
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: single_external
- external_address: 172.17.16.121
- external_network_netmask: 255.255.255.0
-
- mon01.mcp-queens-dvr.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp-queens-dvr.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp-queens-dvr.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log01.mcp-queens-dvr.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log02.mcp-queens-dvr.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log03.mcp-queens-dvr.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr01.mcp-queens-dvr.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr02.mcp-queens-dvr.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr03.mcp-queens-dvr.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp-queens-dvr.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp-queens-dvr.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- share01.mcp-queens-dvr.local:
- reclass_storage_name: openstack_share_node01
- roles:
- - openstack_share
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- dns01.mcp-queens-dvr.local:
- reclass_storage_name: openstack_dns_node01
- roles:
- - openstack_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- dns02.mcp-queens-dvr.local:
- reclass_storage_name: openstack_dns_node02
- roles:
- - openstack_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml
deleted file mode 100644
index 293863a..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/openstack.yaml
deleted file mode 100644
index 70cc4f5..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/openstack.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-queens-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-queens-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
-{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE(INSTALL_BIND=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_MANILA() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-queens-dvr/overrides-policy.yml
deleted file mode 100644
index 1f35a6b..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/overrides-policy.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-parameters:
- nova:
- controller:
- policy:
- context_is_admin: 'role:admin or role:administrator'
- 'compute:create': 'rule:admin_or_owner'
- 'compute:create:attach_network':
- cinder:
- controller:
- policy:
- 'volume:delete': 'rule:admin_or_owner'
- 'volume:extend':
- neutron:
- server:
- policy:
- create_subnet: 'rule:admin_or_network_owner'
- 'get_network:queue_id': 'rule:admin_only'
- 'create_network:shared':
- glance:
- server:
- policy:
- publicize_image: "role:admin"
- add_member:
- keystone:
- server:
- policy:
- admin_or_token_subject: 'rule:admin_required or rule:token_subject'
- heat:
- server:
- policy:
- context_is_admin: 'role:admin and is_admin_project:True'
- deny_stack_user: 'not role:heat_stack_user'
- deny_everybody: '!'
- 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
- 'cloudformation:DescribeStackResources':
- ceilometer:
- server:
- policy:
- segregation: 'rule:context_is_admin'
- 'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/salt.yaml
deleted file mode 100644
index 8c922d2..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/salt.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-queens-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-queens-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
deleted file mode 100644
index 414187b..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-sl.yaml' as SHARED_SL with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
-
-{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
-
-{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
-
-{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
-
-{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
-
-{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
-
-{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
-
-{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data1604.yaml
deleted file mode 100644
index fd1527a..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
deleted file mode 100644
index 2cb86de..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
+++ /dev/null
@@ -1,812 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-queens-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-queens-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-queens-dvr') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-queens-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +113
- default_{{ HOSTNAME_DNS02 }}: +114
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +113
- default_{{ HOSTNAME_DNS02 }}: +114
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_DNS01 }}: +113
- default_{{ HOSTNAME_DNS02 }}: +114
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +113
- default_{{ HOSTNAME_DNS02 }}: +114
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+130, +220]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: false
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: external
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: manila
- capacity: 20
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: manila
- capacity: 20
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_SHARE01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
deleted file mode 100644
index f4854e9..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
+++ /dev/null
@@ -1,217 +0,0 @@
-default_context:
- barbican_backend: dogtag
- barbican_enabled: 'False'
- barbican_integration_enabled: 'False'
- auditd_enabled: 'True'
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-queens-ovs.local
- cluster_name: cookied-mcp-queens-ovs
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- maas_enabled: 'False'
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'False'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.17.16.80 # external network proxy VIP
- openstack_proxy_vip_interface: ens5
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_dns_hostname: dns
- openstack_dns_node01_address: 172.16.10.113
- openstack_dns_node01_hostname: dns01
- openstack_dns_node02_address: 172.16.10.114
- openstack_dns_node02_hostname: dns02
- openstack_version: queens
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 172.16.10.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 172.16.10.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 172.16.10.63
- stacklight_log_node03_hostname: log03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 172.16.10.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 172.16.10.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 172.16.10.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 172.16.10.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- stacklight_long_term_storage_type: influxdb
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
- rsync_fernet_rotation: 'True'
- compute_padding_with_zeros: False
- designate_backend: powerdns
- designate_enabled: 'True'
- nova_vnc_tls_enabled: 'False'
- galera_ssl_enabled: 'False'
- openstack_mysql_x509_enabled: 'False'
- rabbitmq_ssl_enabled: 'False'
- openstack_rabbitmq_x509_enabled: 'False'
- tenant_telemetry_enabled: 'False'
- gnocchi_aggregation_storage: file
- manila_enabled: 'True'
- manila_share_backend: 'lvm'
- manila_lvm_volume_name: 'manila-volume'
- manila_lvm_devices: '/dev/vdc'
- openstack_share_address: 172.16.10.203
- openstack_share_node01_address: 172.16.10.204
- openstack_share_node01_deploy_address: 192.168.10.204
- openstack_share_hostname: share
- openstack_share_node01_hostname: share01
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
deleted file mode 100644
index c2aab29..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
+++ /dev/null
@@ -1,237 +0,0 @@
-nodes:
- cfg01.mcp-queens-ovs.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp-queens-ovs.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp-queens-ovs.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp-queens-ovs.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp-queens-ovs.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- #- openstack_proxy # another VIP interface used
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: single_external
- external_address: 172.17.16.121
- external_network_netmask: 255.255.255.0
-
- mon01.mcp-queens-ovs.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp-queens-ovs.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp-queens-ovs.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log01.mcp-queens-ovs.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log02.mcp-queens-ovs.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log03.mcp-queens-ovs.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr01.mcp-queens-ovs.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr02.mcp-queens-ovs.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mtr03.mcp-queens-ovs.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp-queens-ovs.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp-queens-ovs.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- dns01.mcp-queens-dvr.local:
- reclass_storage_name: openstack_dns_node01
- roles:
- - openstack_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- dns02.mcp-queens-dvr.local:
- reclass_storage_name: openstack_dns_node02
- roles:
- - openstack_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- share01.mcp-queens-dvr.local:
- reclass_storage_name: openstack_share_node01
- roles:
- - openstack_share
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml
deleted file mode 100644
index 739c58c..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/openstack.yaml
deleted file mode 100644
index 75fd27f..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/openstack.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-queens-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-queens-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
-{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE(INSTALL_POWERDNS=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_MANILA() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-queens-ovs/overrides-policy.yml
deleted file mode 100644
index 1f35a6b..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/overrides-policy.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-parameters:
- nova:
- controller:
- policy:
- context_is_admin: 'role:admin or role:administrator'
- 'compute:create': 'rule:admin_or_owner'
- 'compute:create:attach_network':
- cinder:
- controller:
- policy:
- 'volume:delete': 'rule:admin_or_owner'
- 'volume:extend':
- neutron:
- server:
- policy:
- create_subnet: 'rule:admin_or_network_owner'
- 'get_network:queue_id': 'rule:admin_only'
- 'create_network:shared':
- glance:
- server:
- policy:
- publicize_image: "role:admin"
- add_member:
- keystone:
- server:
- policy:
- admin_or_token_subject: 'rule:admin_required or rule:token_subject'
- heat:
- server:
- policy:
- context_is_admin: 'role:admin and is_admin_project:True'
- deny_stack_user: 'not role:heat_stack_user'
- deny_everybody: '!'
- 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
- 'cloudformation:DescribeStackResources':
- ceilometer:
- server:
- policy:
- segregation: 'rule:context_is_admin'
- 'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/salt.yaml
deleted file mode 100644
index 1e102d5..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/salt.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-queens-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-queens-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
deleted file mode 100644
index 5ab3fd0..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-sl.yaml' as SHARED_SL with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
-
-{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
-
-{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
-
-{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
-
-{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
-
-{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
-
-{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
-
-{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data1604.yaml
deleted file mode 100644
index fd1527a..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
deleted file mode 100644
index 349417e..0000000
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
+++ /dev/null
@@ -1,811 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-queens-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-queens-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-queens-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-queens-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +113
- default_{{ HOSTNAME_DNS02 }}: +114
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +113
- default_{{ HOSTNAME_DNS02 }}: +114
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +113
- default_{{ HOSTNAME_DNS02 }}: +114
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +113
- default_{{ HOSTNAME_DNS02 }}: +114
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +204
- ip_ranges:
- dhcp: [+130, +220]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: false
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: external
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: manila
- capacity: 20
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: manila
- capacity: 20
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_SHARE01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-model-generator/salt_bm-cicd-pike-ovs-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_bm-cicd-pike-ovs-maas.yaml
index 49f91da..cb12b3f 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_bm-cicd-pike-ovs-maas.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_bm-cicd-pike-ovs-maas.yaml
@@ -19,6 +19,10 @@
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
+
{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_bm-cicd-queens-ovs-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_bm-cicd-queens-ovs-maas.yaml
index 6369346..697f99d 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_bm-cicd-queens-ovs-maas.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_bm-cicd-queens-ovs-maas.yaml
@@ -19,6 +19,8 @@
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
{{ SHARED.MACRO_UPLOAD_AND_IMPORT_GPG_ENCRYPTION_KEY() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml
deleted file mode 100644
index a830c3f..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Other salt model repository parameters see in shared-salt.yaml
-{% set LAB_CONFIG_NAME = 'cookied-bm-dpdk-pipeline' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-bm-dpdk-pipeline') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dpdk.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-environment.yaml', 'salt-context-cookiecutter-openstack_ovs_dpdk.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2416') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2417') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Temporary WR for correct bridge name according to envoronment templates
- cmd: |
- sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
- salt '*' saltutil.refresh_pillar;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
-
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
index e6c7313..3db5e4e 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
@@ -13,6 +13,9 @@
{% import 'shared-salt.yaml' as SHARED with context %}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
deleted file mode 100644
index c091213..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-bm-mcp-ocata-contrail' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-mcp-ocata-contrail') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail-dpdk.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml', 'salt-context-cookiecutter-contrail-dpdk.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: Temporary WR for cinder backend defined by default in reclass.system
- cmd: |
- sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Workaround for rack01 compute generator"
- cmd: |
- set -e;
- . /root/venv-reclass-tools/bin/activate;
- # Remove rack01 key
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- # Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml --merge;
- # Workaround for compute nodes addresses
- reclass-tools add-key parameters._param.openstack_compute_node01_address '${_param:openstack_compute_rack01_single_subnet}'.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools add-key parameters._param.openstack_compute_node02_address '${_param:openstack_compute_rack01_single_subnet}'.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
-
-- description: "Workaround for PROD-14060"
- cmd: |
- set -e;
- # Add tenant and single addresses for computes
- salt-call reclass.cluster_meta_set deploy_address 172.16.49.72 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
- salt-call reclass.cluster_meta_set tenant_address 192.168.0.101 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
- salt-call reclass.cluster_meta_set single_address 10.167.8.101 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
-
- salt-call reclass.cluster_meta_set deploy_address 172.16.49.74 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
- salt-call reclass.cluster_meta_set tenant_address 192.168.0.102 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
- salt-call reclass.cluster_meta_set single_address 10.167.8.102 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
deleted file mode 100644
index aa74df1..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Other salt model repository parameters see in shared-salt.yaml
-{% set LAB_CONFIG_NAME = 'cookied-bm-mcp-ovs-dpdk' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-bm-mcp-ovs-dpdk') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dpdk.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-environment.yaml','alt-context-cookiecutter-openstack_ovs_dpdk.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2416') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2417') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Temporary WR for correct bridge name according to envoronment templates
- cmd: |
- sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
- salt '*' saltutil.refresh_pillar;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
-
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-k8s-contrail40-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-k8s-contrail40-maas.yaml
index 52098cf..0943346 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-k8s-contrail40-maas.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-k8s-contrail40-maas.yaml
@@ -17,6 +17,8 @@
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-ocata-contrail-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-ocata-contrail-maas.yaml
deleted file mode 100644
index 9bc9ff9..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-ocata-contrail-maas.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-cicd-bm-ocata-contrail-maas' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-cicd-bm-ocata-contrail-maas') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml', 'salt-context-cookiecutter-contrail.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
-
-{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
-{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: Temporary WR for cinder backend defined by default in reclass.system
- cmd: |
- sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary WR for correct bridge name according to envoronment templates
- cmd: |
- sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
-
-- description: Defining username and password params for IPMI access
- cmd: |
- sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
- sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: "Add user/password for IPMI access"
- cmd: |
- set -e;
- set -x;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
- reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround !! Fix or debug
- cmd: |
- sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
- sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail32-maas-2018.8.0.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail32-maas-2018.8.0.yaml
deleted file mode 100644
index 127b860..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail32-maas-2018.8.0.yaml
+++ /dev/null
@@ -1,108 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-cicd-bm-os-contrail32-maas-2018.8.0' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-cicd-bm-os-contrail32-maas-2018.8.0') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml', 'salt-context-cookiecutter-contrail.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
-
-{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
-{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: Temporary WR for cinder backend defined by default in reclass.system
- cmd: |
- sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary WR for correct bridge name according to envoronment templates
- cmd: |
- sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
-
-- description: Temporary WR for correct bridge name according to envoronment templates
- cmd: |
- sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
- sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
- # For 2018.11.0+ :
- # sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
- # sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: "Add user/password for IPMI access"
- cmd: |
- set -e;
- set -x;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
- reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
- # For 2018.11.0+ :
- # reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
- # reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Workaround of bug PROD-22286 for VCP image path (MCP 2018.8.0)"
- cmd: |
- set -e;
- set -x;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround !! Fix or debug
- cmd: |
- sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
- sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: "Workaround for correct repositories for salt during maas bootstrap"
- cmd: |
- set -e;
- set -x;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters.maas.cluster.saltstack_repo_key '${linux:system:repo:mcp_saltstack:key}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
- reclass-tools add-key parameters.maas.cluster.saltstack_repo_trusty 'deb [arch=amd64] ${_param:linux_system_repo_mcp_saltstack_url}/trusty/ trusty main' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
- reclass-tools add-key parameters.maas.cluster.saltstack_repo_xenial 'deb [arch=amd64] ${_param:linux_system_repo_mcp_saltstack_url}/xenial/ xenial main' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Workaround of hardcoded apt_mk_version in docker repo"
- cmd: |
- set -e;
- set -x;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key 'classes' 'system.linux.system.repo.mcp.apt_mirantis.docker' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_init.yml --merge;
- reclass-tools add-key parameters._param.apt_mk_version {{ SHARED.REPOSITORY_SUITE }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_init.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail40-maas-2018.8.0.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail40-maas-2018.8.0.yaml
deleted file mode 100644
index 9ede30d..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail40-maas-2018.8.0.yaml
+++ /dev/null
@@ -1,108 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-cicd-bm-os-contrail40-maas-2018.8.0' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-cicd-bm-os-contrail40-maas-2018.8.0') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml', 'salt-context-cookiecutter-contrail.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
-
-{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
-{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: Temporary WR for cinder backend defined by default in reclass.system
- cmd: |
- sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary WR for correct bridge name according to envoronment templates
- cmd: |
- sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
-
-- description: Temporary WR for correct bridge name according to envoronment templates
- cmd: |
- sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
- sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
- # For 2018.11.0+ :
- # sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
- # sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: "Add user/password for IPMI access"
- cmd: |
- set -e;
- set -x;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
- reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
- # For 2018.11.0+ :
- # reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
- # reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Workaround of bug PROD-22286 for VCP image path (MCP 2018.8.0)"
- cmd: |
- set -e;
- set -x;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround !! Fix or debug
- cmd: |
- sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
- sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: "Workaround for correct repositories for salt during maas bootstrap"
- cmd: |
- set -e;
- set -x;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters.maas.cluster.saltstack_repo_key '${linux:system:repo:mcp_saltstack:key}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
- reclass-tools add-key parameters.maas.cluster.saltstack_repo_trusty 'deb [arch=amd64] ${_param:linux_system_repo_mcp_saltstack_url}/trusty/ trusty main' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
- reclass-tools add-key parameters.maas.cluster.saltstack_repo_xenial 'deb [arch=amd64] ${_param:linux_system_repo_mcp_saltstack_url}/xenial/ xenial main' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Workaround of hardcoded apt_mk_version in docker repo"
- cmd: |
- set -e;
- set -x;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key 'classes' 'system.linux.system.repo.mcp.apt_mirantis.docker' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_init.yml --merge;
- reclass-tools add-key parameters._param.apt_mk_version {{ SHARED.REPOSITORY_SUITE }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_init.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail40-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail40-maas.yaml
index 17ad597..951d6fa 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail40-maas.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail40-maas.yaml
@@ -17,19 +17,14 @@
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-- description: Temporary WR for cinder backend defined by default in reclass.system
- cmd: |
- sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Temporary WR for correct bridge name according to envoronment templates
cmd: |
sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
@@ -58,11 +53,3 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
-
-- description: Temporary workaround !! Fix or debug
- cmd: |
- sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
- sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-queens-contrail-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-queens-contrail-maas.yaml
index 5c65691..0ff7c82 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-queens-contrail-maas.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-queens-contrail-maas.yaml
@@ -17,6 +17,8 @@
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
index 08c2df1..5da87d1 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
@@ -12,6 +12,8 @@
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
{{ SHARED.MACRO_UPLOAD_AND_IMPORT_GPG_ENCRYPTION_KEY() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
index 51e7b5f..4233f9c 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
@@ -12,6 +12,8 @@
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-system.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-system.yaml
index e8a2c85..4b0c57d 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-system.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-system.yaml
@@ -12,6 +12,8 @@
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-ovs-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-ovs-maas.yaml
index f2729fa..4d554c4 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-ovs-maas.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-ovs-maas.yaml
@@ -19,6 +19,8 @@
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml
deleted file mode 100644
index fe171a9..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-cicd-pike-dpdk' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-pike-ovs-dpdk.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context.yaml', 'cookiecutter-context-pike-ovs-dpdk.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- # set wider cpu mask for DPDK
- salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_lcore_mask' value='"0x41"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='compute_ovs_pmd_cpu_mask' value='"0xe"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_socket_mem' value='"512,512"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='compute_hugepages_size' value='"2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='compute_hugepages_mount' value='"/mnt/hugepages_2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Temporary workaround: remove cinder-volume from CTL nodes"
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- cat << 'EOF' >> /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/dpdk.yml
- parameters:
- neutron:
- compute:
- bridge_mappings:
- physnet2: br-prv
- EOF
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-ceph.yaml
index 25c1f9a..4b2b12f 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-ceph.yaml
@@ -12,6 +12,8 @@
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml
deleted file mode 100644
index b4e0202..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-cicd-pike-dvr-sl' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-pike-dvr-sl.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment_context.yaml', 'cookiecutter-context-pike-dvr-sl.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # Workaround of missing reclass.system for dns role
- # salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Temporary workaround: remove cinder-volume from CTL nodes"
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml
deleted file mode 100644
index 0fd346d..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-cicd-pike-ovs-sl' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-pike-ovs-sl.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context.yaml', 'cookiecutter-context-pike-ovs-sl.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- # Bind9 services are placed on the first two ctl nodes
- # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Temporary workaround: remove cinder-volume from CTL nodes"
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml
index 2eb94c1..3264b5c 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml
@@ -12,6 +12,8 @@
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
deleted file mode 100644
index 17301c4..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-pike-dpdk' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-pike-ovs-dpdk.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml', '_context-cookiecutter-pike-ovs-dpdk.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- # set wider cpu mask for DPDK
- salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_lcore_mask' value='"0x41"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='compute_ovs_pmd_cpu_mask' value='"0xe"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_socket_mem' value='"512,512"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='compute_hugepages_size' value='"2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='compute_hugepages_mount' value='"/mnt/hugepages_2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- cat << 'EOF' >> /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/dpdk.yml
- parameters:
- neutron:
- compute:
- bridge_mappings:
- physnet2: br-prv
- EOF
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl-barbican.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl-barbican.yaml
deleted file mode 100644
index 3ac439e..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl-barbican.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-pike-dvr-ssl-barbican' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml', '_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl.yaml
deleted file mode 100644
index a69b421..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-pike-dvr-ssl' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-dvr-ssl.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml', '_context-cookiecutter-mcp-pike-dvr-ssl.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
deleted file mode 100644
index 071d34e..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-pike-dvr' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-dvr.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml', '_context-cookiecutter-mcp-pike-dvr.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
deleted file mode 100644
index 9b34102..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-pike-ovs' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-ovs.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml', '_context-cookiecutter-mcp-pike-ovs.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml
deleted file mode 100644
index dcaff76..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-queens-dvr-ceph' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-dvr-ceph.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['vcp-context-environment.yaml', 'cookiecutter-context-dvr-ceph.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround !! Fix or debug
- cmd: |
- sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
- sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
-
-
-
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl-barbican.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl-barbican.yaml
deleted file mode 100644
index fa1c4ae..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl-barbican.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-queens-dvr-ssl-barbican' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml', '_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl.yaml
deleted file mode 100644
index 53342a0..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-queens-dvr-ssl' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-queens-dvr-ssl.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml', '_context-cookiecutter-mcp-queens-dvr-ssl.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
deleted file mode 100644
index ab976ee..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-queens-dvr' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-queens-dvr.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml', '_context-cookiecutter-mcp-queens-dvr.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
deleted file mode 100644
index b89f211..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-queens-ovs' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-queens-ovs.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml', '_context-cookiecutter-mcp-queens-ovs.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml b/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-k8s-calico-sl.yaml
similarity index 69%
copy from tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
copy to tcp_tests/templates/cookied-model-generator/salt_heat-cicd-k8s-calico-sl.yaml
index 8e8537c..2b07452 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-k8s-calico-sl.yaml
@@ -1,19 +1,23 @@
{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-{% set LAB_CONFIG_NAME = 'cookied-cicd-k8s-calico' %}
+{% set LAB_CONFIG_NAME = 'heat-cicd-k8s-calico-sl' %}
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-k8s.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context-k8s.yaml', 'cookiecutter-context-k8s.yaml'] %}
+{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-k8s-sl.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context-k8s-sl.yaml', 'cookiecutter-context-k8s-sl.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+{{ SHARED.MACRO_UPLOAD_AND_IMPORT_GPG_ENCRYPTION_KEY() }}
+
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-k8s-contrail41-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-k8s-contrail41-sl.yaml
index 84b9aac..b04ba9d 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-k8s-contrail41-sl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-k8s-contrail41-sl.yaml
@@ -17,6 +17,8 @@
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml b/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-k8s-genie.yaml
similarity index 73%
rename from tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
rename to tcp_tests/templates/cookied-model-generator/salt_heat-cicd-k8s-genie.yaml
index 8e8537c..b6d2472 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-k8s-genie.yaml
@@ -1,17 +1,19 @@
{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-{% set LAB_CONFIG_NAME = 'cookied-cicd-k8s-calico' %}
+{% set LAB_CONFIG_NAME = 'heat-cicd-k8s-genie' %}
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-k8s.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context-k8s.yaml', 'cookiecutter-context-k8s.yaml'] %}
+{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-k8s-genie.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context-k8s-genie.yaml', 'cookiecutter-context-k8s-genie.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-pike-contrail41-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-pike-contrail41-sl.yaml
index 992dc35..1d0bae8 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-pike-contrail41-sl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-pike-contrail41-sl.yaml
@@ -17,9 +17,22 @@
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
+- description: "Add cpu_mode"
+ cmd: |
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters.nova.compute.cpu_mode custom /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/compute/init.yml;
+ reclass-tools add-key parameters.nova.compute.libvirt.cpu_model kvm64 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/compute/init.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-pike-dvr-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-pike-dvr-sl.yaml
new file mode 100644
index 0000000..51cb8fd
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-pike-dvr-sl.yaml
@@ -0,0 +1,34 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'heat-cicd-pike-dvr-sl' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','heat-cicd-pike-dvr-sl') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml', 'salt-context-cookiecutter.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
+- description: "Add cpu_mode"
+ cmd: |
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters.nova.compute.cpu_mode custom /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/compute/init.yml;
+ reclass-tools add-key parameters.nova.compute.libvirt.cpu_model kvm64 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/compute/init.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-queens-dvr-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-queens-dvr-sl.yaml
new file mode 100644
index 0000000..eefc22d
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-queens-dvr-sl.yaml
@@ -0,0 +1,34 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'heat-cicd-queens-dvr-sl' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','heat-cicd-queens-dvr-sl') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml', 'salt-context-cookiecutter.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
+- description: "Add cpu_mode"
+ cmd: |
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters.nova.compute.cpu_mode custom /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/compute/init.yml;
+ reclass-tools add-key parameters.nova.compute.libvirt.cpu_model kvm64 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/compute/init.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/heat-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml b/tcp_tests/templates/heat-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
new file mode 100644
index 0000000..40b5867
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
@@ -0,0 +1,215 @@
+default_context:
+ auditd_enabled: 'False'
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
+ bmk_enabled: 'False'
+ calico_cni_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/cni:latest
+ calico_enable_nat: 'True'
+ calico_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/node:latest
+ calico_netmask: '16'
+ calico_network: 192.168.0.0
+ calicoctl_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/ctl:latest
+ ceph_enabled: 'False'
+ cicd_control_node01_address: ==IPV4_NET_CONTROL_PREFIX==.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: ==IPV4_NET_CONTROL_PREFIX==.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: ==IPV4_NET_CONTROL_PREFIX==.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: ==IPV4_NET_CONTROL_PREFIX==.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
+ 3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
+ AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
+ xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
+ B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
+ q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
+ s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
+ V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
+ 9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
+ pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
+ MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
+ 7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
+ udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
+ R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
+ XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
+ Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
+ KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
+ 6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
+ ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
+ ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
+ Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
+ r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
+ mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
+ qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
+ 9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
+ cluster_domain: heat-cicd-k8s-calico-sl.local
+ cluster_name: heat-cicd-k8s-calico-sl
+ context_seed: T3sbEdCaBfxrg9ysyA6LIaift250Ktb389rpcISKbdqPi5j0WHKiKAhBftYueBKl
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: ==IPV4_NET_CONTROL_PREFIX==.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: ''
+ jenkins_pipelines_branch: 'release/2019.2.0'
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+ deploy_network_gateway: ==IPV4_NET_ADMIN_PREFIX==.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: ==IPV4_NET_ADMIN_PREFIX==.0/24
+ deployment_type: physical
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
+ email_address: ddmitriev@mirantis.com
+ etcd_ssl: 'True'
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: ==IPV4_NET_CONTROL_PREFIX==.241
+ infra_kvm01_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.91
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: ==IPV4_NET_CONTROL_PREFIX==.242
+ infra_kvm02_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.92
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: ==IPV4_NET_CONTROL_PREFIX==.243
+ infra_kvm03_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.93
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: ==IPV4_NET_CONTROL_PREFIX==.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ internal_proxy_enabled: 'False'
+ kqueen_custom_mail_enabled: 'False'
+ kqueen_enabled: 'False'
+ kubernetes_control_address: ==IPV4_NET_CONTROL_PREFIX==.10
+ kubernetes_control_node01_address: ==IPV4_NET_CONTROL_PREFIX==.11
+ kubernetes_control_node01_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.11
+ kubernetes_control_node01_hostname: ctl01
+ kubernetes_control_node02_address: ==IPV4_NET_CONTROL_PREFIX==.12
+ kubernetes_control_node02_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.12
+ kubernetes_control_node02_hostname: ctl02
+ kubernetes_control_node03_address: ==IPV4_NET_CONTROL_PREFIX==.13
+ kubernetes_control_node03_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.13
+ kubernetes_control_node03_hostname: ctl03
+ kubernetes_compute_count: 4
+ kubernetes_compute_rack01_hostname: cmp
+ kubernetes_compute_deploy_address_ranges: ==IPV4_NET_ADMIN_PREFIX==.101-==IPV4_NET_ADMIN_PREFIX==.104
+ kubernetes_compute_single_address_ranges: ==IPV4_NET_CONTROL_PREFIX==.101-==IPV4_NET_CONTROL_PREFIX==.104
+ kubernetes_compute_tenant_address_ranges: ==IPV4_NET_TENANT_PREFIX==.101-==IPV4_NET_TENANT_PREFIX==.104
+ kubernetes_enabled: 'True'
+ kubernetes_externaldns_enabled: 'False'
+ kubernetes_keepalived_vip_interface: br_ctl
+ kubernetes_network_calico_enabled: 'True'
+ kubernetes_virtlet_enabled: 'True'
+ kubernetes_proxy_hostname: prx
+ kubernetes_proxy_node01_hostname: prx01
+ kubernetes_proxy_node02_hostname: prx02
+ kubernetes_proxy_address: ==IPV4_NET_CONTROL_PREFIX==.220
+ kubernetes_proxy_node01_address: ==IPV4_NET_CONTROL_PREFIX==.221
+ kubernetes_proxy_node02_address: ==IPV4_NET_CONTROL_PREFIX==.222
+ kubernetes_metallb_enabled: 'True'
+ metallb_addresses: 172.17.16.150-172.17.16.190
+ kubernetes_ingressnginx_enabled: 'True'
+ kubernetes_ingressnginx_controller_replicas: 2
+ local_repositories: 'False'
+ maas_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.15
+ maas_deploy_range_end: ==IPV4_NET_ADMIN_PREFIX==.199
+ maas_deploy_range_start: ==IPV4_NET_ADMIN_PREFIX==.180
+ maas_deploy_vlan: '0'
+ maas_fabric_name: deploy-fabric0
+ maas_hostname: cfg01
+ mcp_common_scripts_branch: ''
+ mcp_version: proposed
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openldap_domain: ${_param:cluster_name}.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ openssh_groups: cicd
+ openstack_enabled: 'False'
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_webhook_app_id: '24'
+ oss_pushkin_email_sender_password: password
+ oss_pushkin_smtp_port: '587'
+ oss_webhook_login_id: '13'
+ platform: kubernetes_enabled
+ public_host: ${_param:infra_config_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: LTlVnap35hqpRVbB5QjA27EuKh9Ttl3k
+ salt_api_password_hash: $6$RKagUPuQ$Javpjz7b.hqKOOr1rai7uGQd/FnqlOH59tXn12/0G.LkVyunYmgBkSC5zTjoqZvIS1fOOOqsmCb9Q4HcGUbXS.
+ salt_master_address: ==IPV4_NET_CONTROL_PREFIX==.15
+ salt_master_hostname: cfg01
+ salt_master_management_address: ==IPV4_NET_ADMIN_PREFIX==.15
+ shared_reclass_branch: 'proposed'
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: ==IPV4_NET_CONTROL_PREFIX==.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: ==IPV4_NET_CONTROL_PREFIX==.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: ==IPV4_NET_CONTROL_PREFIX==.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: ==IPV4_NET_CONTROL_PREFIX==.63
+ stacklight_log_node03_hostname: log03
+ stacklight_long_term_storage_type: prometheus
+ stacklight_monitor_address: ==IPV4_NET_CONTROL_PREFIX==.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: ==IPV4_NET_CONTROL_PREFIX==.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: ==IPV4_NET_CONTROL_PREFIX==.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: ==IPV4_NET_CONTROL_PREFIX==.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: ==IPV4_NET_CONTROL_PREFIX==.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: ==IPV4_NET_CONTROL_PREFIX==.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: ==IPV4_NET_CONTROL_PREFIX==.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: ==IPV4_NET_CONTROL_PREFIX==.88
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: ==IPV4_NET_TENANT_PREFIX==.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: ==IPV4_NET_TENANT_PREFIX==.0/24
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'False'
+ vnf_onboarding_enabled: 'False'
+ secrets_encryption_enabled: 'True'
+ secrets_encryption_key_id: 'F5CB2ADC36159B03'
+ # Used on CI only.
+ secrets_encryption_private_key: ''
+ kubernetes_helm_enabled: 'True'
diff --git a/tcp_tests/templates/heat-cicd-k8s-calico-sl/encryption-key.asc b/tcp_tests/templates/heat-cicd-k8s-calico-sl/encryption-key.asc
new file mode 100644
index 0000000..381eb77
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-k8s-calico-sl/encryption-key.asc
@@ -0,0 +1,56 @@
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+
+lQcYBFyBRcUBEACmP/muUIwbEg6Z7dA3c9I2NadcjDHXKg/ViXwaOB4KSd9/FC0o
+KSBPccWb+1sm+zdUy2f/LC5r8RvU7yZd4Mbzz8R1DQncXg4nG7bppW7oAcHpc0jk
+pV/SvdMYxuXsrbKbpoGEquwVkbb4oTv2MLSBfGfFzBeQfiwGEWm1xPLSeXc4biLC
+FatCU7w4LS1U4BEOqRCp6lW/hQFLoX+j6rNT8TwC5AeFpKgUWuQZGOO4fZKpbvo2
+sCvF5VA1HSVXlZtzum6pL1yzLL/SoyLrYOy1KrZQmSBHG9htCZQVmvYK7U5WtWE4
+Ws5IAj+HwvgKyzXE2Srsirj1NqauQRsk+1riQk3rpDrX2BeXNUSoHR5M/RDY0gCc
+8P6heanQRnyFtjUSoovkQsydY77+QVxe0MCs+lZlg31fL+wJVG7FIbIKKwR5sj8i
+/JqhWE+t2ZzIrQ/7o7fRk7hv/u69Vb/t/Nt7fkbn53zoubqi3kNgXf6hwhTUtfW/
+lE9cc4JTzis4i/RnILUDnAwos1c0Z+tGCUo4pbiP71VfU8L259g+clPFXOIkqA9t
+L9JSZQfhH/lRj3Abs57OvZjN7/D1h8PWB+8nTB8bkoUt45SubgQb0Y9maWUcwjxw
+AcJSIk6mq8vVdBu7zOuslDjMnoUZbtJwcSwQQOnb9UUppjs3CjbcH80ttQARAQAB
+AA/9ExdprtDlJf6u2pJqxNNyInOK4p/e4VydMOJ28/PZz0iod8lzXhdK9JSWItF8
+qD9VHVG2gaErO44Wqh9EgqdbcYg8gUycA0hxy5/tI2uyDsaU5CAvEMLE/Eh8Q24j
+3UgdKK64VOnj7p4rKuYpIp55PB1zNU24rwkuOQwq3Yreb7kvLbXIHA2s+xLunGzj
+tcl9a/eSSFD2w+WcPnkvVT2QlmUvhQ12p6w++QdvBkrLa9ZPz1FFPp6AiFtLGK5e
+KW6uyV1xc9BSjujmpmPBkNIynKNpCFxtTn0uH2doMAr5kkuqIV726SfUZISNkyOa
+pHKtnAtsWHmdv9skzQIBAgAzcXTBGbdDxRj6QR+ohqbsCzfu3z9QHSbXUmxezti9
+bQqpsU1SIg8z2oDARFR6KlRzhnfpPvan+Gp9TvYsvxrXe61HpxRMdLj6Gt2Ibruf
+YHCtr1S9J5CzTTOurlIKpACUYIqgVXfgIkQzqiYX8e56PiDTUB++OqEg66i0orXB
+nbHAD2vu16CNvcaNqsak3DWkHMwmEfsuxqyUXNte0eYu9SCHtnNoYT/D7A72gK4b
+Gqg80J8ZCpo1ilIX3xUq8WsH+CoXs0X7hy6Cbi22AqnHFRYmrgoIWmRzJonp393b
+yqmTV+QsKQRpmwdX4hiH78zJLnLEUQMn8CuHAGwaJCzk4okIAMKNrIQZhkdbCCe4
+IrLuMKn4aQj3c22SMXNmu78/0cP9Rtsm3ChjzzelLO7NjvPm0nIvEcThFSIZIXCv
+iWGZCXFCKn3WtA5xWuMFNXsEQcc3AG/qRODdDSeFpo+VH/9IwppAc3zI2jxe1PRD
+G2DnheLaLIKgHunsCYxpftJDod/vRqRHeU7ulMVJfEKVxdzrCbKGiIOXSyS6KowQ
+JOxF/80ocq/25Zc/oH25Y2r/0y+xzDpOHBgU0ndrCZf2z8oOuECJTxcq83UDyJzT
+HrG/hTrU83YsQMZ0AwBrYxpzUfdH7b6y60VE19FrwmMDK6Fz8I/x4Ai0sNkI3QLR
+NntY9fsIANrB3QM8CtsdxXsFvdTEwNLsG8LMdn3loCH6Cq3ejkEKa69Uua+sB6ND
+wYOXWzyksLZJyfxIXux/hMlK/kO3ohGcEFiMUaDZndJy8IKUlDrhwcUZqm7dXMDU
+CIf0T3rOEzOXbNu3UTds3j/ruSvA5KmjzOa4Qnb41CyL5Fh7x0R8Rux3NzAn6Ecx
+Y+nAWRtI/Yz7zdL8zuHaJfbVuxAPJ+ImcXAS7cX6T9dM3tWRlam1+0Ezhdb4F8i5
+lcY7sMu95scDwhV7qOmln6wtGSkBPZgE0+TqRuELZrPvlcIRRIM42UwPWhYO2PG8
+kKd2i5teweDnhzN8+E87VV2BQhP9DA8H/0+ZiXsvaG60JGqNmWzVbB6U1qgwrFOR
+VcuzIWpdZyQR8Ok63GXuA0odoqReolba9R6fVlXchj6INBz2WY2F0twwCRPx7tRg
+Pyq4PaTA8ZYYjAVWVCd9k97gY2i80p4MPzQCnE8g4n6OWGY47pcTwSkm4HBoGoam
+igIRn3Soz7CXGF+PvSGi1T0jpwM5IWfM3IwEUPdPTIJuA2iD/9zSKDvhsP+trJ1Y
+TMe9CW3Llf5mFbHLRZ7LfMOLIngKOIxBAxHiT8wUrIRaH78wHdz8ALDsC+LNP6rK
+hKb8h/VHXaqmf0BlNjGpO7XZXfxXWJ0oTUG5Z+jKz2Ir14HYLZI1GlOA8bQlZXhh
+bXBsZS5jb20gPHNhbHQtbWFzdGVyQGV4YW1wbGUuY29tPokCTgQTAQgAOBYhBLaR
+Vrvqyq56MiGjUvXLKtw2FZsDBQJcgUXFAhsvBQsJCAcCBhUKCQgLAgQWAgMBAh4B
+AheAAAoJEPXLKtw2FZsDpi4P/1kmvlpkbOhrL73zAPyMzYa4Yo2Pi/BoMbyEKNKO
+K3wLCdP6xLGecVIt8pANosksDSGlWAnWj36/jfgt/aZisx1u6MTYaOEHkXahxOX4
+ghDW1cTbdtz7Uy5Ah9O3WNI+ejmOpCtuc3P/XOkdttKZLuCNCs6ocgCsejpNHcFK
+vMhOhnRKV8kcBrG2QLyfSyafBtM/zV+NR4Wrng71Za8fiXHlDanmrAIyuSnD538r
+hTwSFe0C9HntwuF6W+UShN7c+jPJaKQjKbZy9fuFp33NcTSPCB5dH9yrhQvOeFQo
+dFzEabMDFVGPfUVWR+TH39dWYOsq5zFmgQAbOB/vHdmEtrYNrxX0AiCZZHQHTUb9
+oBK68V8eVeFdoRLcMORBZ2RCqkQTOQoAF7o772knltjtsymnI0XNvVC/XCnZv89Q
+/eoivrd/rMMpTFOGcys6EAnSUWx0ZG/JCkezQqnx9U219BvqKNOZ60aOeOYHKpsX
+Ha8Nr72YRmtm0UMsDjEUyLOj+o06XnN7uafMv2bZpjWh2hfOrkAbxe41z6t+78ho
+P+C5vSvp01OmAt71iq+62MXVcLVKEWDpiuZSj8m83RlY5AGIaPaGX9LKPcHdGxKw
+QSczgB/jI3G08vWaq82he6UJuYexbYe1iJXfvcx8kThwZ1nXQJm+7UsISUsh8/NZ
+x0n/
+=uxDD
+-----END PGP PRIVATE KEY BLOCK-----
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml b/tcp_tests/templates/heat-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
similarity index 76%
rename from tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml
rename to tcp_tests/templates/heat-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
index be99dbb..2d4689c 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml
+++ b/tcp_tests/templates/heat-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
@@ -4,7 +4,6 @@
roles:
- infra_config
- linux_system_codename_xenial
- - features_runtest
interfaces:
ens3:
role: single_dhcp
@@ -78,40 +77,42 @@
role: single_ctl
ctl01:
- reclass_storage_name: openstack_control_node01
+ reclass_storage_name: kubernetes_control_node01
roles:
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_lvm_backend_control
+ - kubernetes_control
- linux_system_codename_xenial
interfaces:
ens3:
role: single_dhcp
ens4:
- role: single_ctl
+ role: single_ctl_calico
ctl02:
- reclass_storage_name: openstack_control_node02
+ reclass_storage_name: kubernetes_control_node02
roles:
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
+ - kubernetes_control
- linux_system_codename_xenial
interfaces:
ens3:
role: single_dhcp
ens4:
- role: single_ctl
+ role: single_ctl_calico
ctl03:
- reclass_storage_name: openstack_control_node03
+ reclass_storage_name: kubernetes_control_node03
roles:
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_lvm_backend_control
+ - kubernetes_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl_calico
+
+ prx01:
+ reclass_storage_name: kubernetes_proxy_node01
+ roles:
+ - kubernetes_proxy
- linux_system_codename_xenial
interfaces:
ens3:
@@ -119,20 +120,29 @@
ens4:
role: single_ctl
- prx01:
- reclass_storage_name: openstack_proxy_node01
+ prx02:
+ reclass_storage_name: kubernetes_proxy_node02
roles:
- #- openstack_proxy # another VIP interface used
+ - kubernetes_proxy
- linux_system_codename_xenial
interfaces:
ens3:
role: single_dhcp
ens4:
role: single_ctl
- ens5:
- role: single_external
- external_address: 172.17.16.121
- external_network_netmask: 255.255.255.0
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>:
+ reclass_storage_name: kubernetes_compute_rack01
+ roles:
+ - kubernetes_compute
+ - linux_system_codename_xenial
+ - salt_master_host
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl_calico
mon01:
reclass_storage_name: stacklight_server_node01
@@ -167,39 +177,6 @@
ens4:
role: single_ctl
- log01:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log02:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- log03:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
mtr01:
reclass_storage_name: stacklight_telemetry_node01
roles:
@@ -233,35 +210,35 @@
ens4:
role: single_ctl
- # Generator-based computes. For compatibility only
- cmp<<count>>:
- reclass_storage_name: openstack_compute_rack01
+ log01:
+ reclass_storage_name: stacklight_log_node01
roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
+ - stacklight_log_leader_v2
- linux_system_codename_xenial
interfaces:
ens3:
role: single_dhcp
ens4:
role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
- gtw01:
- reclass_storage_name: openstack_gateway_node01
+ log02:
+ reclass_storage_name: stacklight_log_node02
roles:
+ - stacklight_log
- linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
interfaces:
ens3:
role: single_dhcp
ens4:
role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
+
+ log03:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/heat-cicd-k8s-calico-sl/salt.yaml b/tcp_tests/templates/heat-cicd-k8s-calico-sl/salt.yaml
new file mode 100644
index 0000000..745b97c
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-k8s-calico-sl/salt.yaml
@@ -0,0 +1,23 @@
+{% set HOSTNAME_CFG01='cfg01.heat-cicd-k8s-calico-sl.local' %}
+{% set LAB_CONFIG_NAME='heat-cicd-k8s-calico-sl' %}
+{% set DOMAIN_NAME='heat-cicd-k8s-calico-sl.local' %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: "Share custom key from cfg to give each node acces with key from cfg01"
+ cmd: |
+ set -e;
+ set -x;
+ key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
+ salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
diff --git a/tcp_tests/templates/heat-cicd-k8s-calico-sl/underlay--user-data-foundation.yaml b/tcp_tests/templates/heat-cicd-k8s-calico-sl/underlay--user-data-foundation.yaml
new file mode 100644
index 0000000..1677dcd
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-k8s-calico-sl/underlay--user-data-foundation.yaml
@@ -0,0 +1,64 @@
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ssh_pwauth: True
+users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ - name: jenkins
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFSxeuXh2sO4VYL8N2dlNFVyNcr2RvoH4MeDD/cV2HThfU4/BcH6IOOWXSDibIU279bWVKCL7QUp3mf0Vf7HPuyFuC12QM+l7MwT0jCYh5um3hmAvM6Ga0nkhJygHexe9/rLEYzZJkIjP9/IS/YXSv8rhHg484wQ6qkEuq15nyMqil8tbDQCq0XQ+AWEpNpIa4pUoKmFMsOP8lq10KZXIXsJyZxizadr6Bh4Lm9LWrk8YCw7qP3rmgWxK/s8qXQh1ISZe6ONfcmk6p03qbh4H3CwKyWzxmnIHQvE6PgN/O+PuAZj3PbR2mkkJjYX4jNPlxvj8uTStaVPhAwfR9Spdx jenkins@cz8133
+
+disable_root: false
+chpasswd:
+ list: |
+ root:r00tme
+ jenkins:qalab
+ expire: False
+
+packages:
+ - openjdk-8-jre-headless
+ - libyaml-dev
+ - libffi-dev
+ - libvirt-dev
+ - python-dev
+ - python-pip
+ - python-virtualenv
+ #- python-psycopg2
+ - pkg-config
+ - vlan
+ - bridge-utils
+ - ebtables
+
+bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+runcmd:
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /etc/bash_completion.d/fuel_devops30_activate
+ content: |
+ source /home/jenkins/fuel-devops30/bin/activate
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604.yaml b/tcp_tests/templates/heat-cicd-k8s-calico-sl/underlay-userdata.yaml
similarity index 61%
rename from tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604.yaml
rename to tcp_tests/templates/heat-cicd-k8s-calico-sl/underlay-userdata.yaml
index 6451e34..8c1f248 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/heat-cicd-k8s-calico-sl/underlay-userdata.yaml
@@ -1,39 +1,33 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
- ssh_pwauth: True
- users:
+ssh_pwauth: True
+users:
- name: root
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
- disable_root: false
- chpasswd:
+disable_root: false
+chpasswd:
list: |
root:r00tme
expire: False
- bootcmd:
+bootcmd:
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
- output:
+output:
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
- runcmd:
+runcmd:
- if lvs vg0; then pvresize /dev/vda3; fi
- if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
- export TERM=linux
- export LANG=C
# Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- sudo resolvconf -u
-
+ #- sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
# Enable grub menu using updated config below
- update-grub
@@ -41,14 +35,7 @@
- sudo ifup ens3
#- sudo route add default gw {gateway} {interface_name}
- # Create swap
- #- fallocate -l 16G /swapfile
- #- chmod 600 /swapfile
- #- mkswap /swapfile
- #- swapon /swapfile
- #- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
+write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
content: |
GRUB_RECORDFAIL_TIMEOUT=30
@@ -63,20 +50,20 @@
- path: /usr/share/growlvm/image-layout.yml
content: |
root:
- size: '30%VG'
+ size: '65%VG'
home:
- size: '1G'
+ size: '1%VG'
var_log:
- size: '11%VG'
+ size: '10%VG'
var_log_audit:
- size: '5G'
+ size: '5%VG'
var_tmp:
- size: '11%VG'
+ size: '10%VG'
tmp:
- size: '5G'
+ size: '5%VG'
owner: root:root
- growpart:
+growpart:
mode: auto
devices:
- '/'
diff --git a/tcp_tests/templates/heat-cicd-k8s-calico-sl/underlay.hot b/tcp_tests/templates/heat-cicd-k8s-calico-sl/underlay.hot
new file mode 100644
index 0000000..c8ec4fe
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-k8s-calico-sl/underlay.hot
@@ -0,0 +1,597 @@
+---
+
+heat_template_version: queens
+
+description: MCP environment for heat-cicd-k8s-calico-sl
+
+parameters:
+ instance_domain:
+ type: string
+ default: heat-cicd-k8s-calico-sl.local
+ mcp_version:
+ type: string
+ env_name:
+ type: string
+ control_subnet_cidr:
+ type: string
+ management_subnet_cidr:
+ type: string
+ management_subnet_pool_start:
+ type: string
+ management_subnet_pool_end:
+ type: string
+ management_subnet_cfg01_ip:
+ type: string
+ management_subnet_gateway_ip:
+ type: string
+
+ key_pair:
+ type: string
+
+ ctl_flavor:
+ type: string
+ cfg_flavor:
+ type: string
+ cid_flavor:
+ type: string
+ kvm_fake_flavor:
+ type: string
+ mon_flavor:
+ type: string
+ log_flavor:
+ type: string
+ mtr_flavor:
+ type: string
+ cmp_flavor:
+ type: string
+ foundation_flavor:
+ type: string
+
+ net_public:
+ type: string
+
+ foundation_image:
+ type: string
+
+resources:
+ networks:
+ type: MCP::Networks
+ properties:
+ stack_name: { get_param: "OS::stack_name" }
+ env_name: { get_param: env_name }
+
+ #flavors:
+ # type: MCP::Flavors
+
+ cfg01_node:
+ type: MCP::MasterNode
+ depends_on: [networks]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ cfg01_flavor: { get_param: cfg_flavor }
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '15' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '15' ]
+ instance_name: cfg01
+ instance_domain: {get_param: instance_domain}
+ network: { get_attr: [networks, network] }
+
+ control_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: ctl01
+ instance02_name: ctl02
+ instance03_name: ctl03
+ instance01_role: k8s_controller
+ instance_flavor: {get_param: ctl_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '11' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '12' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '13' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '11' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '12' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '13' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '11' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '12' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '13' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ fake_kvm_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [control_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: kvm01
+ instance02_name: kvm02
+ instance03_name: kvm03
+ instance_flavor: {get_param: kvm_fake_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '241' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '242' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '243' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '241' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '242' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '243' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '241' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '242' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '243' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cicd_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [fake_kvm_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: cid01
+ instance02_name: cid02
+ instance03_name: cid03
+ instance_flavor: {get_param: cid_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '91' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '92' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '93' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '91' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '92' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '93' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '91' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '92' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '93' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ stacklight_monitor_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cicd_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: mon01
+ instance02_name: mon02
+ instance03_name: mon03
+ instance_flavor: {get_param: mon_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '71' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '72' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '73' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '71' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '72' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '73' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '71' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '72' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '73' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ stacklight_log_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [stacklight_monitor_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: log01
+ instance02_name: log02
+ instance03_name: log03
+ instance_flavor: {get_param: log_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '61' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '62' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '63' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '61' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '62' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '63' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '61' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '62' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '63' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ stacklight_mtr_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [stacklight_log_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: mtr01
+ instance02_name: mtr02
+ instance03_name: mtr03
+ instance_flavor: {get_param: mtr_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '86' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '87' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '88' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '86' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '87' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '88' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '86' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '87' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '88' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ prx01_virtual:
+ type: MCP::SingleInstance
+ depends_on: [stacklight_mtr_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: prx01
+ instance_flavor: {get_param: cid_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '221' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '221' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '221' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ prx02_virtual:
+ type: MCP::SingleInstance
+ depends_on: [prx01_virtual]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: prx02
+ instance_flavor: {get_param: cid_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '222' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '222' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '222' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cmp001_virtual:
+ type: MCP::Compute
+ depends_on: [prx02_virtual]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: cmp001
+ instance_flavor: {get_param: cmp_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '101' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '101' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '101' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cmp002_virtual:
+ type: MCP::Compute
+ depends_on: [cmp001_virtual]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: cmp002
+ instance_flavor: {get_param: cmp_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '102' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '102' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '102' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cmp003_virtual:
+ type: MCP::Compute
+ depends_on: [cmp002_virtual]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: cmp003
+ instance_flavor: {get_param: cmp_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '103' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '103' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '103' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cmp004_virtual:
+ type: MCP::Compute
+ depends_on: [cmp003_virtual]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: cmp004
+ instance_flavor: {get_param: cmp_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '104' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '104' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '104' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ foundation_node:
+ type: MCP::FoundationNode
+ depends_on: [networks]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: foundation
+ instance_image: { get_param: foundation_image }
+ instance_flavor: {get_param: foundation_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay--user-data-foundation.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '5' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '5' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '5' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+outputs:
+
+ control_subnet_cidr:
+ description: Control network CIDR
+ value: { get_param: control_subnet_cidr }
+
+ management_subnet_cidr:
+ description: Admin network CIDR
+ value: { get_param: management_subnet_cidr }
+
+ foundation_floating:
+ description: foundation node IP address (floating) from external network
+ value:
+ get_attr:
+ - foundation_node
+ - instance_floating_address
+...
diff --git a/tcp_tests/templates/heat-cicd-k8s-contrail41-sl/salt-context-cookiecutter-k8s-contrail.yaml b/tcp_tests/templates/heat-cicd-k8s-contrail41-sl/salt-context-cookiecutter-k8s-contrail.yaml
index db98f7a..599f7d7 100644
--- a/tcp_tests/templates/heat-cicd-k8s-contrail41-sl/salt-context-cookiecutter-k8s-contrail.yaml
+++ b/tcp_tests/templates/heat-cicd-k8s-contrail41-sl/salt-context-cookiecutter-k8s-contrail.yaml
@@ -161,6 +161,7 @@
control_network_subnet: ==IPV4_NET_CONTROL_PREFIX==.0/24
control_vlan: '10'
cookiecutter_template_branch: ''
+ jenkins_pipelines_branch: 'release/2019.2.0'
cookiecutter_template_credentials: gerrit
cookiecutter_template_url: ssh://gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
deploy_network_gateway: ==IPV4_NET_ADMIN_PREFIX==.1
diff --git a/tcp_tests/templates/heat-cicd-k8s-contrail41-sl/underlay.hot b/tcp_tests/templates/heat-cicd-k8s-contrail41-sl/underlay.hot
index 4f550a7..0003d16 100644
--- a/tcp_tests/templates/heat-cicd-k8s-contrail41-sl/underlay.hot
+++ b/tcp_tests/templates/heat-cicd-k8s-contrail41-sl/underlay.hot
@@ -56,6 +56,9 @@
net_public:
type: string
+ foundation_image:
+ type: string
+
resources:
networks:
type: MCP::Networks
@@ -73,6 +76,14 @@
env_name: { get_param: env_name }
mcp_version: { get_param: mcp_version }
cfg01_flavor: { get_param: cfg_flavor }
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '15' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '15' ]
instance_name: cfg01
instance_domain: {get_param: instance_domain}
network: { get_attr: [networks, network] }
@@ -87,6 +98,7 @@
instance01_name: ctl01
instance02_name: ctl02
instance03_name: ctl03
+ instance01_role: k8s_controller
instance_flavor: {get_param: ctl_flavor}
network: { get_attr: [networks, network] }
underlay_userdata: { get_file: ./underlay-userdata.yaml }
@@ -102,6 +114,30 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '13' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '11' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '12' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '13' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '11' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '12' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '13' ]
instance_config_host: { get_attr: [cfg01_node, instance_address] }
@@ -130,6 +166,30 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '243' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '241' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '242' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '243' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '241' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '242' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '243' ]
instance_config_host: { get_attr: [cfg01_node, instance_address] }
@@ -158,6 +218,30 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '93' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '91' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '92' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '93' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '91' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '92' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '93' ]
instance_config_host: { get_attr: [cfg01_node, instance_address] }
@@ -186,6 +270,31 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '73' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '71' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '72' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '73' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '71' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '72' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '73' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
stacklight_log_cluster:
@@ -213,6 +322,31 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '63' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '61' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '62' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '63' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '61' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '62' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '63' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
stacklight_mtr_cluster:
@@ -240,6 +374,31 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '88' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '86' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '87' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '88' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '86' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '87' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '88' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
ceph_cmn_cluster:
@@ -267,6 +426,31 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '68' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '66' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '67' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '68' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '66' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '67' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '68' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
ceph_rgw_cluster:
@@ -294,6 +478,31 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '78' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '76' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '77' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '78' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '76' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '77' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '78' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
ceph_osd_cluster:
@@ -321,6 +530,31 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '203' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '201' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '202' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '203' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '201' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '202' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '203' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
prx01_virtual:
@@ -338,6 +572,15 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '221' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '221' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '221' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
prx02_virtual:
@@ -355,6 +598,14 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '222' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '222' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '222' ]
instance_config_host: { get_attr: [cfg01_node, instance_address] }
cmp001_virtual:
@@ -372,6 +623,15 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '101' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '101' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '101' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
cmp002_virtual:
@@ -389,6 +649,15 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '102' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '102' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '102' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
foundation_node:
@@ -399,6 +668,7 @@
mcp_version: { get_param: mcp_version }
instance_domain: {get_param: instance_domain}
instance_name: foundation
+ instance_image: { get_param: foundation_image }
instance_flavor: {get_param: foundation_flavor}
network: { get_attr: [networks, network] }
underlay_userdata: { get_file: ./underlay--user-data-foundation.yaml }
@@ -406,6 +676,15 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '5' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '5' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '5' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
outputs:
diff --git a/tcp_tests/templates/heat-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml b/tcp_tests/templates/heat-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
new file mode 100644
index 0000000..48d8f1d
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
@@ -0,0 +1,128 @@
+default_context:
+ auditd_enabled: 'False'
+ bmk_enabled: 'False'
+ calico_cni_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/cni:latest
+ calico_enable_nat: 'True'
+ calico_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/node:latest
+ calico_netmask: '16'
+ calico_network: 192.168.0.0
+ calicoctl_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/ctl:latest
+ ceph_enabled: 'False'
+ cicd_control_node01_address: ==IPV4_NET_CONTROL_PREFIX==.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: ==IPV4_NET_CONTROL_PREFIX==.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: ==IPV4_NET_CONTROL_PREFIX==.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: ==IPV4_NET_CONTROL_PREFIX==.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cluster_domain: heat-cicd-k8s-genie.local
+ cluster_name: heat-cicd-k8s-genie
+ context_seed: T3sbEdCaBfxrg9ysyA6LIaift250Ktb389rpcISKbdqPi5j0WHKiKAhBftYueBKl
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: ==IPV4_NET_CONTROL_PREFIX==.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: ''
+ jenkins_pipelines_branch: 'release/2019.2.0'
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+ deploy_network_gateway: ==IPV4_NET_ADMIN_PREFIX==.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: ==IPV4_NET_ADMIN_PREFIX==.0/24
+ deployment_type: physical
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
+ email_address: ddmitriev@mirantis.com
+ etcd_ssl: 'True'
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: ==IPV4_NET_CONTROL_PREFIX==.241
+ infra_kvm01_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.91
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: ==IPV4_NET_CONTROL_PREFIX==.242
+ infra_kvm02_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.92
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: ==IPV4_NET_CONTROL_PREFIX==.243
+ infra_kvm03_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.93
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: ==IPV4_NET_CONTROL_PREFIX==.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ internal_proxy_enabled: 'False'
+ kqueen_custom_mail_enabled: 'False'
+ kqueen_enabled: 'False'
+ kubernetes_control_address: ==IPV4_NET_CONTROL_PREFIX==.10
+ kubernetes_control_node01_address: ==IPV4_NET_CONTROL_PREFIX==.11
+ kubernetes_control_node01_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.11
+ kubernetes_control_node01_hostname: ctl01
+ kubernetes_control_node02_address: ==IPV4_NET_CONTROL_PREFIX==.12
+ kubernetes_control_node02_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.12
+ kubernetes_control_node02_hostname: ctl02
+ kubernetes_control_node03_address: ==IPV4_NET_CONTROL_PREFIX==.13
+ kubernetes_control_node03_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.13
+ kubernetes_control_node03_hostname: ctl03
+ kubernetes_compute_count: 4
+ kubernetes_compute_rack01_hostname: cmp
+ kubernetes_compute_deploy_address_ranges: ==IPV4_NET_ADMIN_PREFIX==.101-==IPV4_NET_ADMIN_PREFIX==.104
+ kubernetes_compute_single_address_ranges: ==IPV4_NET_CONTROL_PREFIX==.101-==IPV4_NET_CONTROL_PREFIX==.104
+ kubernetes_compute_tenant_address_ranges: ==IPV4_NET_TENANT_PREFIX==.101-==IPV4_NET_TENANT_PREFIX==.104
+ kubernetes_enabled: 'True'
+ kubernetes_externaldns_enabled: 'False'
+ kubernetes_keepalived_vip_interface: br_ctl
+ kubernetes_network_calico_enabled: 'True'
+ kubernetes_proxy_hostname: prx
+ kubernetes_proxy_node01_hostname: prx01
+ kubernetes_proxy_node02_hostname: prx02
+ kubernetes_proxy_address: ==IPV4_NET_CONTROL_PREFIX==.220
+ kubernetes_proxy_node01_address: ==IPV4_NET_CONTROL_PREFIX==.221
+ kubernetes_proxy_node02_address: ==IPV4_NET_CONTROL_PREFIX==.222
+ kubernetes_metallb_enabled: 'True'
+ metallb_addresses: 172.17.16.150-172.17.16.190
+ kubernetes_ingressnginx_enabled: 'True'
+ kubernetes_ingressnginx_controller_replicas: 2
+ local_repositories: 'False'
+ maas_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.15
+ maas_deploy_range_end: ==IPV4_NET_ADMIN_PREFIX==.199
+ maas_deploy_range_start: ==IPV4_NET_ADMIN_PREFIX==.180
+ maas_deploy_vlan: '0'
+ maas_fabric_name: deploy-fabric0
+ maas_hostname: cfg01
+ mcp_common_scripts_branch: ''
+ mcp_version: proposed
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openldap_domain: ${_param:cluster_name}.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ openssh_groups: cicd
+ openstack_enabled: 'False'
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ platform: kubernetes_enabled
+ public_host: ${_param:infra_config_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: LTlVnap35hqpRVbB5QjA27EuKh9Ttl3k
+ salt_api_password_hash: $6$RKagUPuQ$Javpjz7b.hqKOOr1rai7uGQd/FnqlOH59tXn12/0G.LkVyunYmgBkSC5zTjoqZvIS1fOOOqsmCb9Q4HcGUbXS.
+ salt_master_address: ==IPV4_NET_CONTROL_PREFIX==.15
+ salt_master_hostname: cfg01
+ salt_master_management_address: ==IPV4_NET_ADMIN_PREFIX==.15
+ shared_reclass_branch: 'proposed'
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
+ stacklight_enabled: 'False'
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: ==IPV4_NET_TENANT_PREFIX==.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: ==IPV4_NET_TENANT_PREFIX==.0/24
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'False'
+ vnf_onboarding_enabled: 'False'
+ kubernetes_network_flannel_enabled: 'True'
+ flannel_network: 10.20.0.0/16
+ kubernetes_network_genie_enabled: 'True'
+ kubernetes_genie_default_plugin: 'calico'
+ kubernetes_virtlet_enabled: 'True'
+ kubernetes_helm_enabled: 'True'
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml b/tcp_tests/templates/heat-cicd-k8s-genie/environment-context-k8s-genie.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
rename to tcp_tests/templates/heat-cicd-k8s-genie/environment-context-k8s-genie.yaml
diff --git a/tcp_tests/templates/heat-cicd-k8s-genie/salt.yaml b/tcp_tests/templates/heat-cicd-k8s-genie/salt.yaml
new file mode 100644
index 0000000..fbac1ee
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-k8s-genie/salt.yaml
@@ -0,0 +1,23 @@
+{% set HOSTNAME_CFG01='cfg01.heat-cicd-k8s-genie.local' %}
+{% set LAB_CONFIG_NAME='heat-cicd-k8s-genie' %}
+{% set DOMAIN_NAME='heat-cicd-k8s-genie.local' %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: "Share custom key from cfg to give each node acces with key from cfg01"
+ cmd: |
+ set -e;
+ set -x;
+ key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
+ salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
diff --git a/tcp_tests/templates/heat-cicd-k8s-genie/underlay--user-data-foundation.yaml b/tcp_tests/templates/heat-cicd-k8s-genie/underlay--user-data-foundation.yaml
new file mode 100644
index 0000000..1677dcd
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-k8s-genie/underlay--user-data-foundation.yaml
@@ -0,0 +1,64 @@
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ssh_pwauth: True
+users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ - name: jenkins
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFSxeuXh2sO4VYL8N2dlNFVyNcr2RvoH4MeDD/cV2HThfU4/BcH6IOOWXSDibIU279bWVKCL7QUp3mf0Vf7HPuyFuC12QM+l7MwT0jCYh5um3hmAvM6Ga0nkhJygHexe9/rLEYzZJkIjP9/IS/YXSv8rhHg484wQ6qkEuq15nyMqil8tbDQCq0XQ+AWEpNpIa4pUoKmFMsOP8lq10KZXIXsJyZxizadr6Bh4Lm9LWrk8YCw7qP3rmgWxK/s8qXQh1ISZe6ONfcmk6p03qbh4H3CwKyWzxmnIHQvE6PgN/O+PuAZj3PbR2mkkJjYX4jNPlxvj8uTStaVPhAwfR9Spdx jenkins@cz8133
+
+disable_root: false
+chpasswd:
+ list: |
+ root:r00tme
+ jenkins:qalab
+ expire: False
+
+packages:
+ - openjdk-8-jre-headless
+ - libyaml-dev
+ - libffi-dev
+ - libvirt-dev
+ - python-dev
+ - python-pip
+ - python-virtualenv
+ #- python-psycopg2
+ - pkg-config
+ - vlan
+ - bridge-utils
+ - ebtables
+
+bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+runcmd:
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /etc/bash_completion.d/fuel_devops30_activate
+ content: |
+ source /home/jenkins/fuel-devops30/bin/activate
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604.yaml b/tcp_tests/templates/heat-cicd-k8s-genie/underlay-userdata.yaml
similarity index 61%
copy from tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604.yaml
copy to tcp_tests/templates/heat-cicd-k8s-genie/underlay-userdata.yaml
index 6451e34..8c1f248 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/heat-cicd-k8s-genie/underlay-userdata.yaml
@@ -1,39 +1,33 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
- ssh_pwauth: True
- users:
+ssh_pwauth: True
+users:
- name: root
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
- disable_root: false
- chpasswd:
+disable_root: false
+chpasswd:
list: |
root:r00tme
expire: False
- bootcmd:
+bootcmd:
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
- output:
+output:
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
- runcmd:
+runcmd:
- if lvs vg0; then pvresize /dev/vda3; fi
- if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
- export TERM=linux
- export LANG=C
# Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- sudo resolvconf -u
-
+ #- sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
# Enable grub menu using updated config below
- update-grub
@@ -41,14 +35,7 @@
- sudo ifup ens3
#- sudo route add default gw {gateway} {interface_name}
- # Create swap
- #- fallocate -l 16G /swapfile
- #- chmod 600 /swapfile
- #- mkswap /swapfile
- #- swapon /swapfile
- #- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
+write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
content: |
GRUB_RECORDFAIL_TIMEOUT=30
@@ -63,20 +50,20 @@
- path: /usr/share/growlvm/image-layout.yml
content: |
root:
- size: '30%VG'
+ size: '65%VG'
home:
- size: '1G'
+ size: '1%VG'
var_log:
- size: '11%VG'
+ size: '10%VG'
var_log_audit:
- size: '5G'
+ size: '5%VG'
var_tmp:
- size: '11%VG'
+ size: '10%VG'
tmp:
- size: '5G'
+ size: '5%VG'
owner: root:root
- growpart:
+growpart:
mode: auto
devices:
- '/'
diff --git a/tcp_tests/templates/heat-cicd-k8s-genie/underlay.hot b/tcp_tests/templates/heat-cicd-k8s-genie/underlay.hot
new file mode 100644
index 0000000..158f366
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-k8s-genie/underlay.hot
@@ -0,0 +1,441 @@
+---
+
+heat_template_version: queens
+
+description: MCP environment for heat-cicd-k8s-genie
+
+parameters:
+ instance_domain:
+ type: string
+ default: heat-cicd-k8s-genie.local
+ mcp_version:
+ type: string
+ env_name:
+ type: string
+ control_subnet_cidr:
+ type: string
+ management_subnet_cidr:
+ type: string
+ management_subnet_pool_start:
+ type: string
+ management_subnet_pool_end:
+ type: string
+ management_subnet_cfg01_ip:
+ type: string
+ management_subnet_gateway_ip:
+ type: string
+
+ key_pair:
+ type: string
+
+ ctl_flavor:
+ type: string
+ cfg_flavor:
+ type: string
+ cid_flavor:
+ type: string
+ kvm_fake_flavor:
+ type: string
+ mon_flavor:
+ type: string
+ log_flavor:
+ type: string
+ mtr_flavor:
+ type: string
+ cmp_flavor:
+ type: string
+ foundation_flavor:
+ type: string
+
+ net_public:
+ type: string
+
+ foundation_image:
+ type: string
+
+resources:
+ networks:
+ type: MCP::Networks
+ properties:
+ stack_name: { get_param: "OS::stack_name" }
+ env_name: { get_param: env_name }
+
+ #flavors:
+ # type: MCP::Flavors
+
+ cfg01_node:
+ type: MCP::MasterNode
+ depends_on: [networks]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ cfg01_flavor: { get_param: cfg_flavor }
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '15' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '15' ]
+ instance_name: cfg01
+ instance_domain: {get_param: instance_domain}
+ network: { get_attr: [networks, network] }
+
+ control_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: ctl01
+ instance02_name: ctl02
+ instance03_name: ctl03
+ instance01_role: [k8s_controller]
+ instance_flavor: {get_param: ctl_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '11' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '12' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '13' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '11' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '12' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '13' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '11' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '12' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '13' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ fake_kvm_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [control_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: kvm01
+ instance02_name: kvm02
+ instance03_name: kvm03
+ instance_flavor: {get_param: kvm_fake_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '241' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '242' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '243' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '241' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '242' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '243' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '241' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '242' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '243' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cicd_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [fake_kvm_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: cid01
+ instance02_name: cid02
+ instance03_name: cid03
+ instance_flavor: {get_param: cid_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '91' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '92' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '93' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '91' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '92' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '93' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '91' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '92' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '93' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ prx01_virtual:
+ type: MCP::SingleInstance
+ depends_on: [cicd_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: prx01
+ instance_flavor: {get_param: cid_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '221' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '221' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '221' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ prx02_virtual:
+ type: MCP::SingleInstance
+ depends_on: [prx01_virtual]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: prx02
+ instance_flavor: {get_param: cid_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '222' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '222' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '222' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cmp001_virtual:
+ type: MCP::Compute
+ depends_on: [prx02_virtual]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: cmp001
+ instance_flavor: {get_param: cmp_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '101' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '101' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '101' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cmp002_virtual:
+ type: MCP::Compute
+ depends_on: [cmp001_virtual]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: cmp002
+ instance_flavor: {get_param: cmp_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '102' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '102' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '102' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cmp003_virtual:
+ type: MCP::Compute
+ depends_on: [cmp002_virtual]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: cmp003
+ instance_flavor: {get_param: cmp_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '103' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '103' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '103' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cmp004_virtual:
+ type: MCP::Compute
+ depends_on: [cmp003_virtual]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: cmp004
+ instance_flavor: {get_param: cmp_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '104' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '104' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '104' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ foundation_node:
+ type: MCP::FoundationNode
+ depends_on: [networks]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: foundation
+ instance_image: { get_param: foundation_image }
+ instance_flavor: {get_param: foundation_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay--user-data-foundation.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '5' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '5' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '5' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+outputs:
+
+ control_subnet_cidr:
+ description: Control network CIDR
+ value: { get_param: control_subnet_cidr }
+
+ management_subnet_cidr:
+ description: Admin network CIDR
+ value: { get_param: management_subnet_cidr }
+
+ foundation_floating:
+ description: foundation node IP address (floating) from external network
+ value:
+ get_attr:
+ - foundation_node
+ - instance_floating_address
+...
diff --git a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
index 92a456d..fc56b80 100644
--- a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
@@ -81,6 +81,7 @@
control_vlan: '10'
tenant_vlan: '20'
cookiecutter_template_branch: ''
+ jenkins_pipelines_branch: 'release/2019.2.0'
cookiecutter_template_credentials: gerrit
cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
deploy_network_gateway: ==IPV4_NET_ADMIN_PREFIX==.1
@@ -231,7 +232,7 @@
stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: ==IPV4_NET_TENANT_PREFIX==.1
+ tenant_network_gateway: ==IPV4_NET_TENANT_PREFIX==.220
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: ==IPV4_NET_TENANT_PREFIX==.0/24
upstream_proxy_enabled: 'False'
@@ -249,7 +250,6 @@
ceph_osd_journal_size: 3
ceph_osd_bond_mode: "active-backup"
ceph_osd_data_partition_prefix: ""
-
ceph_public_network_allocation: storage
ceph_public_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
ceph_cluster_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
diff --git a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay.hot b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay.hot
index f3f0e12..4b82924 100644
--- a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay.hot
+++ b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay.hot
@@ -60,10 +60,14 @@
type: string
foundation_flavor:
type: string
-
+ vsrx_flavor:
+ type: string
net_public:
type: string
+ foundation_image:
+ type: string
+
resources:
networks:
type: MCP::Networks
@@ -81,6 +85,14 @@
env_name: { get_param: env_name }
mcp_version: { get_param: mcp_version }
cfg01_flavor: { get_param: cfg_flavor }
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '15' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '15' ]
instance_name: cfg01
instance_domain: {get_param: instance_domain}
network: { get_attr: [networks, network] }
@@ -110,6 +122,30 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '13' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '11' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '12' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '13' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '11' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '12' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '13' ]
instance_config_host: { get_attr: [cfg01_node, instance_address] }
@@ -138,6 +174,31 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '53' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '51' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '52' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '53' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '51' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '52' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '53' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
fake_kvm_cluster:
@@ -165,6 +226,30 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '243' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '241' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '242' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '243' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '241' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '242' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '243' ]
instance_config_host: { get_attr: [cfg01_node, instance_address] }
@@ -193,6 +278,31 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '43' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '41' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '42' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '43' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '41' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '42' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '43' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
cicd_cluster:
@@ -220,6 +330,30 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '93' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '91' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '92' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '93' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '91' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '92' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '93' ]
instance_config_host: { get_attr: [cfg01_node, instance_address] }
@@ -248,6 +382,31 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '23' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '21' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '22' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '23' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '21' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '22' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '23' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
contrail_nal_cluster:
@@ -275,6 +434,31 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '33' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '31' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '32' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '33' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '31' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '32' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '33' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
stacklight_monitor_cluster:
@@ -302,6 +486,31 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '73' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '71' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '72' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '73' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '71' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '72' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '73' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
stacklight_log_cluster:
@@ -329,6 +538,31 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '63' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '61' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '62' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '63' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '61' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '62' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '63' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
stacklight_mtr_cluster:
@@ -356,6 +590,31 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '88' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '86' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '87' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '88' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '86' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '87' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '88' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
ceph_cmn_cluster:
@@ -383,6 +642,30 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '68' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '66' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '67' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '68' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '66' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '67' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '68' ]
instance_config_host: { get_attr: [cfg01_node, instance_address] }
ceph_rgw_cluster:
@@ -410,6 +693,30 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '78' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '76' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '77' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '78' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '76' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '77' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '78' ]
instance_config_host: { get_attr: [cfg01_node, instance_address] }
ceph_osd_cluster:
@@ -437,6 +744,31 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '203' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '201' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '202' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '203' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '201' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '202' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '203' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
prx01_virtual:
@@ -454,6 +786,14 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '81' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '81' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '81' ]
instance_config_host: { get_attr: [cfg01_node, instance_address] }
@@ -472,6 +812,15 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '101' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '101' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '101' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
cmp002_virtual:
@@ -489,6 +838,15 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '102' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '102' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '102' ]
+
instance_config_host: { get_attr: [cfg01_node, instance_address] }
foundation_node:
@@ -499,6 +857,7 @@
mcp_version: { get_param: mcp_version }
instance_domain: {get_param: instance_domain}
instance_name: foundation
+ instance_image: { get_param: foundation_image }
instance_flavor: {get_param: foundation_flavor}
network: { get_attr: [networks, network] }
underlay_userdata: { get_file: ./underlay--user-data-foundation.yaml }
@@ -506,6 +865,39 @@
list_join:
- '.'
- [ { get_attr: [networks, control_net_prefix] }, '5' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '5' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '5' ]
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ vsrx_node:
+ type: MCP::VsrxNode
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: vsrx
+ instance_flavor: {get_param: vsrx_flavor}
+ instance_image: system_vsrx-12.1X46-D20.5
+ network: { get_attr: [networks, network] }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '220' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '220' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '220' ]
instance_config_host: { get_attr: [cfg01_node, instance_address] }
outputs:
diff --git a/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml b/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml
new file mode 100644
index 0000000..213ca41
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml
@@ -0,0 +1,368 @@
+default_context:
+ auditd_enabled: 'False'
+ backend_network_netmask: 255.255.255.0
+ backend_network_subnet: 10.167.4.0/24
+ backend_vlan: '10'
+ backup_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpQIBAAKCAQEAuY7v++mza4e75f80GYE2iIdZ30d7yvT6Xym00iD/OxRWNtXe
+ rIh7M0X30Q0F2D3hVvPz57axTheOK3xFRVvPoIZjm3fVgwNzQmTyfAZz4TOdTtWx
+ 9cye8Bo20qlRpq8wFQMSDhgRv0J1iX6LjJsr8pM1A8q3e4GYnv0DrLBZ1Iq7+T/k
+ qzzsT7PuvGvEK63J/DaE6BI73QG+0W0MvblddznwXvXLo/VlBXajWOv37YHiMFMT
+ Zap7lTvGVEyxByVEM04Bo7ABF2PEPwGrGL9nOpJ1LSxBCcryNVyZbveFF/e8A1Cj
+ 178rD+W4H5p2Agr5A/y3LZpTkyhnTtWXzwT3YwIDAQABAoIBACiUNa8lgHM3j8PQ
+ d5hMRZy93M2TWGMeB9Lf0AdT5/0HiYMcakHY5vhjiLpS2sBbZ/gYCXLW5Rdq11Bz
+ MMLmPRWhzg6lui+YhZAze0PcNWM+YlxnJy/Vu7xOP0b6eDy3exBdR4mFgfwNkJ6s
+ 6d+p34aA4ssdfdqokLPUKQWO21Y7UVYbht6Tv55nd3YMGXHxJ0phitf7/dFsEX9Z
+ sPSdWqkYMP2UWQBrFSjxV9Q+kE8OQ1VYDFCRa/9a5QHMrFo/0dOxLkZosTcCHM8A
+ H2RHPcKrxFWn7A3eAiA4VCvtM8RX239Bi7Gdvfl1HflSkQwBDUV8F2RZLHM2NU2T
+ EGBQcuECgYEA4ZBwZAtJIQ0R35prGLFj+drb/IKr+x2WD9WOZ83cheGSwdCRk/he
+ zZ5cCKgmSqg9cDJ4/vraoqmAlwQ4uj4e1TudgHPwdDUPuwoveIbUfUyzdIZMt0s4
+ fe61AUhEniIOi09H+E2yHz6OWSw3uA4SKkNsMT4RZc4Nag3Fo86Rrj8CgYEA0piY
+ HMYPHposfjVNM0PMU9F1lwQJMdx3a55JYgUc8cMvrsZPzvwJqrGCMNjP4lPwl/AS
+ x73yaxcxEYGiG6wDkvx+hujjyAx+sal62EB9ofJGDI7u8L2/0voW53RWvTUBsy8e
+ +xOQTewCAAYGLIJnGfEyVqEAu9IPwz3pep8xtd0CgYEAruTusDOr9SuMI0M5LQFG
+ UpHnJogvT1smYoqki0osZcZ8ozjT19aps2bJV5EBd7uxP5BzDsl0wtEIuo90aLwH
+ 7i/2NIYw9/m4g78nBZ4NnkXdk0karLhvSf3PbPoa8j3X5x6G4DlmFiHL/8pwPY7z
+ eL+kYR4OIVC+R+/7wcJGZMMCgYEAqOLg0epvw53mYoxCTgNoACvw/lupOAhS6MY2
+ mVn6XVOnkKTO6fIrmmziOGQXSq0APAi2NuL4XrNpkV2BcGmhMCY3Hd/0k8CZdcax
+ km0dk1skm/ugWQYCqKIQ7irZSMESjO0UDkwhJKxI6lXqa5VkM2S/dsOFQBp0s6GZ
+ 9NFn3y0CgYEAogzKchxouu4BgqHn76W0IB/XeTuiCDSGRv+IwMoghxbPoT6lO920
+ OHWoo+bX3VuxpCFkN2fFH6V8WncUrv4ItAgxGftL8h9BhMRKiatwOBAw0vG/CO2G
+ CIyvmjhIvpIdAl8i1jIJw1sn/ZVYm8+ZKy4VAqPevc3Ze7WGoMUkFyg=
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5ju/76bNrh7vl/zQZgTaIh1nfR3vK9PpfKbTSIP87FFY21d6siHszRffRDQXYPeFW8/PntrFOF44rfEVFW8+ghmObd9WDA3NCZPJ8BnPhM51O1bH1zJ7wGjbSqVGmrzAVAxIOGBG/QnWJfouMmyvykzUDyrd7gZie/QOssFnUirv5P+SrPOxPs+68a8Qrrcn8NoToEjvdAb7RbQy9uV13OfBe9cuj9WUFdqNY6/ftgeIwUxNlqnuVO8ZUTLEHJUQzTgGjsAEXY8Q/AasYv2c6knUtLEEJyvI1XJlu94UX97wDUKPXvysP5bgfmnYCCvkD/LctmlOTKGdO1ZfPBPdj
+ bmk_enabled: 'False'
+ ceph_cluster_network: 10.167.4.0/24
+ ceph_enabled: 'True'
+ ceph_hyper_converged: 'False'
+ ceph_mon_node01_address: 10.167.4.66
+ ceph_mon_node01_hostname: cmn01
+ ceph_mon_node02_address: 10.167.4.67
+ ceph_mon_node02_hostname: cmn02
+ ceph_mon_node03_address: 10.167.4.68
+ ceph_mon_node03_hostname: cmn03
+ ceph_osd_backend: bluestore
+ ceph_osd_block_db_size: '3'
+ ceph_osd_bond_mode: active-backup
+ ceph_osd_data_partition_prefix: ""
+ ceph_osd_count: '3'
+ ceph_osd_data_disks: "/dev/vdb"
+ ceph_osd_journal_or_block_db_disks: "/dev/vdb"
+ ceph_osd_node_count: '3'
+ ceph_osd_journal_size: '3'
+ ceph_osd_deploy_nic: "eth0"
+ ceph_osd_primary_first_nic: eth1
+ ceph_osd_primary_second_nic: eth2
+ ceph_osd_single_address_ranges: 10.167.4.201-10.167.4.203
+ ceph_osd_deploy_address_ranges: 10.167.5.70-10.167.5.72
+ ceph_osd_backend_address_ranges: 10.167.6.201-10.167.6.203
+ ceph_osd_storage_address_ranges: 10.167.4.201-10.167.4.203
+ ceph_public_network_allocation: storage
+ ceph_osd_mode: "separated"
+ ceph_public_network: 10.167.4.0/24
+ ceph_rgw_address: 10.167.4.75
+ ceph_rgw_hostname: rgw
+ ceph_rgw_node01_address: 10.167.4.76
+ ceph_rgw_node01_hostname: rgw01
+ ceph_rgw_node02_address: 10.167.4.77
+ ceph_rgw_node02_hostname: rgw02
+ ceph_rgw_node03_address: 10.167.4.78
+ ceph_rgw_node03_hostname: rgw03
+ ceph_version: luminous
+ cicd_control_node01_address: 10.167.4.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.4.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.4.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.4.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
+ 3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
+ AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
+ xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
+ B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
+ q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
+ s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
+ V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
+ 9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
+ pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
+ MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
+ 7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
+ udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
+ R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
+ XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
+ Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
+ KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
+ 6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
+ ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
+ ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
+ Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
+ r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
+ mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
+ qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
+ 9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
+ cluster_domain: heat-cicd-pike-dvr-sl.local
+ cluster_name: heat-cicd-pike-dvr-sl
+ compute_bond_mode: active-backup
+ compute_padding_with_zeros: 'True'
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: tekHhhWzn3YrxKbXGMvtWYj1usHGrRBYd2gfFwWNCnRentwCu1QKANHvpIeZCRvz
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.4.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: 'proposed'
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+ deploy_network_gateway: 10.167.5.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: 10.167.5.0/24
+ deployment_type: physical
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
+ email_address: test@mirantis.com
+ gainsight_service_enabled: 'False'
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ gnocchi_aggregation_storage: ceph
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.4.241
+ infra_kvm01_deploy_address: 10.167.5.67
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.4.242
+ infra_kvm02_deploy_address: 10.167.5.68
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.4.243
+ infra_kvm03_deploy_address: 10.167.5.69
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.4.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ internal_proxy_enabled: 'False'
+ kubernetes_ctl_on_kvm: 'False'
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 10.167.5.15
+ maas_deploy_network_name: deploy_network
+ maas_deploy_range_end: 10.167.5.230
+ maas_deploy_range_start: 10.167.5.20
+ maas_deploy_vlan: '0'
+ maas_enabled: 'False'
+ maas_fabric_name: deploy_fabric
+ maas_hostname: cfg01
+ mcp_common_scripts_branch: 'proposed'
+ mcp_version: proposed
+ no_platform: 'False'
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openldap_domain: ${_param:cluster_name}.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ openssh_groups: cicd
+ openstack_benchmark_node01_address: 10.167.4.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 10.167.4
+ openstack_compute_rack01_tenant_subnet: 10.167.6
+ openstack_compute_single_address_ranges: 10.167.4.101-10.167.4.102
+ openstack_compute_deploy_address_ranges: 10.167.5.73-10.167.5.74
+ openstack_compute_tenant_address_ranges: 10.167.6.101-10.167.6.102
+ openstack_compute_backend_address_ranges: 10.167.6.101-10.167.6.102
+ openstack_control_address: 10.167.4.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.4.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.4.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.4.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.4.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.4.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.4.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.4.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 10.167.4.224
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 10.167.6.224
+ openstack_gateway_node02_address: 10.167.4.225
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.167.6.225
+ openstack_gateway_node03_address: 10.167.4.226
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 10.167.6.226
+ openstack_message_queue_address: 10.167.4.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.4.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.4.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.4.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: ovs
+ openstack_neutron_bgp_vpn: 'False'
+ openstack_neutron_bgp_vpn_driver: bagpipe
+ openstack_neutron_qos: 'False'
+ openstack_neutron_vlan_aware_vms: 'False'
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_nova_compute_reserved_host_memory_mb: '900'
+ openstack_ovs_dvr_enabled: 'True'
+ openstack_ovs_encapsulation_type: vxlan
+ #openstack_proxy_address: 172.17.16.80 # external network endpoint
+ openstack_proxy_address: 10.167.4.80 # external network endpoint
+ openstack_proxy_vip_interface: ens5
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.4.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.4.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.4.19
+ openstack_version: pike
+ oss_enabled: 'False'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: BX7ium4MaRPIWBdyhj4LTbiedwg3yLep
+ salt_api_password_hash: $6$qYqzkiRP$MiqA5ZMfsmdXJcuTTyeCgNPv9CBGO5nSH4HwRKPGUh0MFXcEa8JDCUEtS8xLHCkol7CMdq.l6CG7of0iaUJ.u.
+ salt_master_address: 10.167.4.15
+ salt_master_hostname: cfg01
+ salt_master_management_address: 10.167.5.15
+ shared_reclass_branch: 'proposed'
+ shared_reclass_url: https://github.com/Mirantis/reclass-system-salt-model.git
+ sriov_network_subnet: 10.55.0.0/16
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.167.6.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.167.6.0/24
+ tenant_telemetry_enabled: 'False'
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
+ version: proposed
+ vnf_onboarding_enabled: 'False'
+ openstack_telemetry_address: 10.167.4.83
+ openstack_telemetry_hostname: mdb
+ openstack_telemetry_node01_address: 10.167.4.84
+ openstack_telemetry_node01_hostname: mdb01
+ openstack_telemetry_node02_address: 10.167.4.85
+ openstack_telemetry_node02_hostname: mdb02
+ openstack_telemetry_node03_address: 10.167.4.86
+ openstack_telemetry_node03_hostname: mdb03
+ fluentd_enabled: 'True'
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.4.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.4.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.4.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.4.63
+ stacklight_log_node03_hostname: log03
+ stacklight_monitor_address: 10.167.4.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.4.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.4.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.4.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.4.96
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.4.97
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.4.98
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.4.99
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ stacklight_long_term_storage_type: prometheus
+ nova_vnc_tls_enabled: 'True'
+ galera_ssl_enabled: 'True'
+ openstack_mysql_x509_enabled: 'True'
+ rabbitmq_ssl_enabled: 'True'
+ openstack_rabbitmq_x509_enabled: 'True'
+ openstack_internal_protocol: 'https'
+ openstack_create_public_network: 'True'
+ openstack_public_neutron_subnet_gateway: 10.9.0.1
+ openstack_public_neutron_subnet_cidr: 10.9.0.0/24
+ openstack_public_neutron_subnet_allocation_start: 10.9.0.201
+ openstack_public_neutron_subnet_allocation_end: 10.9.0.245
+
+ #openstack_public_neutron_subnet_gateway: 172.17.16.1
+ #openstack_public_neutron_subnet_cidr: 172.17.16.0/24
+ #openstack_public_neutron_subnet_allocation_start: 172.17.16.201
+ #openstack_public_neutron_subnet_allocation_end: 172.17.16.245
+ manila_enabled: 'False'
+ barbican_enabled: 'True'
+ barbican_integration_enabled: 'False'
+
+ openstack_barbican_address: 10.167.4.44
+ openstack_barbican_hostname: kmn
+ openstack_barbican_node01_address: 10.167.4.45
+ openstack_barbican_node01_hostname: kmn01
+ openstack_barbican_node02_address: 10.167.4.46
+ openstack_barbican_node02_hostname: kmn02
+ openstack_barbican_node03_address: 10.167.4.47
+ openstack_barbican_node03_hostname: kmn03
+
+ designate_backend: powerdns
+ designate_enabled: 'True'
+ openstack_dns_node01_address: 10.167.4.113
+ openstack_dns_node02_address: 10.167.4.114
+ octavia_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEAtjnPDJsQToHBtoqIo15mdSYpfi8z6DFMi8Gbo0KCN33OUn5u
+ OctbdtjUfeuhvI6px1SCnvyWi09Ft8eWwq+KwLCGKbUxLvqKltuJ7K3LIrGXkt+m
+ qZN4O9XKeVKfZH+mQWkkxRWgX2r8RKNV3GkdNtd74VjhP+R6XSKJQ1Z8b7eHM10v
+ 6IjTY/jPczjK+eyCeEj4qbSnV8eKlqLhhquuSQRmUO2DRSjLVdpdf2BB4/BdWFsD
+ YOmX7mb8kpEr9vQ+c1JKMXDwD6ehzyU8kE+1kVm5zOeEy4HdYIMpvUfN49P1anRV
+ 2ISQ1ZE+r22IAMKl0tekrGH0e/1NP1DF5rINMwIDAQABAoIBAQCkP/cgpaRNHyg8
+ ISKIHs67SWqdEm73G3ijgB+JSKmW2w7dzJgN//6xYUAnP/zIuM7PnJ0gMQyBBTMS
+ NBTv5spqZLKJZYivj6Tb1Ya8jupKm0jEWlMfBo2ZYVrfgFmrfGOfEebSvmuPlh9M
+ vuzlftmWVSSUOkjODmM9D6QpzgrbpktBuA/WpX+6esMTwJpOcQ5xZWEnHXnVzuTc
+ SncodVweE4gz6F1qorbqIJz8UAUQ5T0OZTdHzIS1IbamACHWaxQfixAO2s4+BoUK
+ ANGGZWkfneCxx7lthvY8DiKn7M5cSRnqFyDToGqaLezdkMNlGC7v3U11FF5blSEW
+ fL1o/HwBAoGBAOavhTr8eqezTchqZvarorFIq7HFWk/l0vguIotu6/wlh1V/KdF+
+ aLLHgPgJ5j+RrCMvTBoKqMeeHfVGrS2udEy8L1mK6b3meG+tMxU05OA55abmhYn7
+ 7vF0q8XJmYIHIXmuCgF90R8Piscb0eaMlmHW9unKTKo8EOs5j+D8+AMJAoGBAMo4
+ 8WW+D3XiD7fsymsfXalf7VpAt/H834QTbNZJweUWhg11eLutyahyyfjjHV200nNZ
+ cnU09DWKpBbLg7d1pyT69CNLXpNnxuWCt8oiUjhWCUpNqVm2nDJbUdlRFTzYb2fS
+ ZC4r0oQaPD5kMLSipjcwzMWe0PniySxNvKXKInFbAoGBAKxW2qD7uKKKuQSOQUft
+ aAksMmEIAHWKTDdvOA2VG6XvX5DHBLXmy08s7rPfqW06ZjCPCDq4Velzvgvc9koX
+ d/lP6cvqlL9za+x6p5wjPQ4rEt/CfmdcmOE4eY+1EgLrUt314LHGjjG3ScWAiirE
+ QyDrGOIGaYoQf89L3KqIMr0JAoGARYAklw8nSSCUvmXHe+Gf0yKA9M/haG28dCwo
+ 780RsqZ3FBEXmYk1EYvCFqQX56jJ25MWX2n/tJcdpifz8Q2ikHcfiTHSI187YI34
+ lKQPFgWb08m1NnwoWrY//yx63BqWz1vjymqNQ5GwutC8XJi5/6Xp+tGGiRuEgJGH
+ EIPUKpkCgYAjBIVMkpNiLCREZ6b+qjrPV96ed3iTUt7TqP7yGlFI/OkORFS38xqC
+ hBP6Fk8iNWuOWQD+ohM/vMMnvIhk5jwlcwn+kF0ra04gi5KBFWSh/ddWMJxUtPC1
+ 2htvlEc6zQAR6QfqXHmwhg1hP81JcpqpicQzCMhkzLoR1DC6stXdLg==
+ -----END RSA PRIVATE KEY-----
+ octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2Oc8MmxBOgcG2ioijXmZ1Jil+LzPoMUyLwZujQoI3fc5Sfm45y1t22NR966G8jqnHVIKe/JaLT0W3x5bCr4rAsIYptTEu+oqW24nsrcsisZeS36apk3g71cp5Up9kf6ZBaSTFFaBfavxEo1XcaR0213vhWOE/5HpdIolDVnxvt4czXS/oiNNj+M9zOMr57IJ4SPiptKdXx4qWouGGq65JBGZQ7YNFKMtV2l1/YEHj8F1YWwNg6ZfuZvySkSv29D5zUkoxcPAPp6HPJTyQT7WRWbnM54TLgd1ggym9R83j0/VqdFXYhJDVkT6vbYgAwqXS16SsYfR7/U0/UMXmsg0z
+ openstack_octavia_enabled: 'True'
+ octavia_health_manager_node01_address: 192.168.1.10
+ octavia_health_manager_node02_address: 192.168.1.11
+ octavia_health_manager_node03_address: 192.168.1.12
+ octavia_manager_cluster: 'True'
+ octavia_hm_bind_ip: 192.168.1.12
+ octavia_lb_mgmt_cidr: 192.168.1.0/24
+ octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
+ octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
+ cinder_backup_engine: 'ceph'
+ cinder_ceph_backup_pool_name: 'backups'
+ jenkins_pipelines_branch: 'release/2019.2.0'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt-context-environment.yaml b/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt-context-environment.yaml
similarity index 63%
copy from tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt-context-environment.yaml
copy to tcp_tests/templates/heat-cicd-pike-dvr-sl/salt-context-environment.yaml
index 025f4e7..76c4cf1 100644
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt-context-environment.yaml
+++ b/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt-context-environment.yaml
@@ -1,395 +1,501 @@
-nodes:
- # Virtual Control Plane nodes
- cid01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: cicd_control_node01
- roles:
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- cid02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: cicd_control_node02
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- cid03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: cicd_control_node03
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ctl01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ctl02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ctl03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- dbs01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- dbs02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- dbs03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- msg01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
+nodes:
+ cfg01:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ - features_runtest_cfg
interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- msg02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- msg03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- prx01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- prx02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mon01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mon02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mon03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- nal01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node01
- roles:
- - opencontrail_analytics
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- nal02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node02
- roles:
- - opencontrail_analytics
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- nal03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node03
- roles:
- - opencontrail_analytics
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ntw01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node01
- roles:
- - opencontrail_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ntw02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node02
- roles:
- - opencontrail_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ntw03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node03
- roles:
- - opencontrail_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mtr01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mtr02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mtr03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- log01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- log02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- log03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
+ ens3:
+ role: single_static_mgm
+ ens4:
+ role: single_static_ctl
+ ens6:
+ role: single_external
+ external_address: 10.9.0.15
+ external_network_netmask: 255.255.255.0
- cmn01.cookied-bm-mcp-ocata-contrail.local:
+ cid01:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid02:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid03:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl01:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl02:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl03:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dbs01:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dbs02:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dbs03:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ msg01:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ msg02:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ msg03:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx01:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens6:
+ role: single_external
+ external_address: 10.9.0.121
+ external_network_netmask: 255.255.255.0
+
+ prx02:
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens6:
+ role: single_external
+ external_address: 10.9.0.122
+ external_network_netmask: 255.255.255.0
+
+ mon01:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon02:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon03:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr01:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr02:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr03:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log01:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log02:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log03:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kvm01:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kvm02:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kvm03:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cmp<<count>>:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw01:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw02:
+ reclass_storage_name: openstack_gateway_node02
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw03:
+ reclass_storage_name: openstack_gateway_node03
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ osd<<count>>:
+ reclass_storage_name: ceph_osd_rack01
+ roles:
+ - ceph_osd
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cmn01:
reclass_storage_name: ceph_mon_node01
roles:
- ceph_mon
- linux_system_codename_xenial
interfaces:
- ens2:
- role: single_dhcp
ens3:
+ role: single_dhcp
+ ens4:
role: single_ctl
- cmn02.cookied-bm-mcp-ocata-contrail.local:
+ cmn02:
reclass_storage_name: ceph_mon_node02
roles:
- ceph_mon
- linux_system_codename_xenial
interfaces:
- ens2:
- role: single_dhcp
ens3:
+ role: single_dhcp
+ ens4:
role: single_ctl
- cmn03.cookied-bm-mcp-ocata-contrail.local:
+ cmn03:
reclass_storage_name: ceph_mon_node03
roles:
- ceph_mon
- linux_system_codename_xenial
interfaces:
- ens2:
- role: single_dhcp
ens3:
+ role: single_dhcp
+ ens4:
role: single_ctl
- rgw01.cookied-bm-mcp-ocata-contrail.local:
+ rgw01:
reclass_storage_name: ceph_rgw_node01
roles:
- ceph_rgw
- linux_system_codename_xenial
interfaces:
- ens2:
- role: single_dhcp
ens3:
+ role: single_dhcp
+ ens4:
role: single_ctl
- rgw02.cookied-bm-mcp-ocata-contrail.local:
+ rgw02:
reclass_storage_name: ceph_rgw_node02
roles:
- ceph_rgw
- linux_system_codename_xenial
interfaces:
- ens2:
- role: single_dhcp
ens3:
+ role: single_dhcp
+ ens4:
role: single_ctl
- rgw03.cookied-bm-mcp-ocata-contrail.local:
+ rgw03:
reclass_storage_name: ceph_rgw_node03
roles:
- ceph_rgw
- linux_system_codename_xenial
interfaces:
- ens2:
- role: single_dhcp
ens3:
+ role: single_dhcp
+ ens4:
role: single_ctl
-# bmk01.cookied-bm-mcp-ocata-contrail.local:
-# reclass_storage_name: openstack_benchmark_node01
-# roles:
-# - openstack_benchmark
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
+ kmn01:
+ reclass_storage_name: openstack_barbican_node01
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kmn02:
+ reclass_storage_name: openstack_barbican_node02
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kmn03:
+ reclass_storage_name: openstack_barbican_node03
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dns01:
+ reclass_storage_name: openstack_dns_node01
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dns02:
+ reclass_storage_name: openstack_dns_node02
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
diff --git a/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt.yaml b/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt.yaml
new file mode 100644
index 0000000..32ca5ce
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt.yaml
@@ -0,0 +1,14 @@
+{% set HOSTNAME_CFG01='cfg01.heat-cicd-pike-dvr-sl.local' %}
+{% set LAB_CONFIG_NAME='heat-cicd-pike-dvr-sl' %}
+{% set DOMAIN_NAME='heat-cicd-pike-dvr-sl.local' %}
+
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
diff --git a/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay--user-data-foundation.yaml b/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay--user-data-foundation.yaml
new file mode 100644
index 0000000..1677dcd
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay--user-data-foundation.yaml
@@ -0,0 +1,64 @@
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ssh_pwauth: True
+users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ - name: jenkins
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFSxeuXh2sO4VYL8N2dlNFVyNcr2RvoH4MeDD/cV2HThfU4/BcH6IOOWXSDibIU279bWVKCL7QUp3mf0Vf7HPuyFuC12QM+l7MwT0jCYh5um3hmAvM6Ga0nkhJygHexe9/rLEYzZJkIjP9/IS/YXSv8rhHg484wQ6qkEuq15nyMqil8tbDQCq0XQ+AWEpNpIa4pUoKmFMsOP8lq10KZXIXsJyZxizadr6Bh4Lm9LWrk8YCw7qP3rmgWxK/s8qXQh1ISZe6ONfcmk6p03qbh4H3CwKyWzxmnIHQvE6PgN/O+PuAZj3PbR2mkkJjYX4jNPlxvj8uTStaVPhAwfR9Spdx jenkins@cz8133
+
+disable_root: false
+chpasswd:
+ list: |
+ root:r00tme
+ jenkins:qalab
+ expire: False
+
+packages:
+ - openjdk-8-jre-headless
+ - libyaml-dev
+ - libffi-dev
+ - libvirt-dev
+ - python-dev
+ - python-pip
+ - python-virtualenv
+ #- python-psycopg2
+ - pkg-config
+ - vlan
+ - bridge-utils
+ - ebtables
+
+bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+runcmd:
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /etc/bash_completion.d/fuel_devops30_activate
+ content: |
+ source /home/jenkins/fuel-devops30/bin/activate
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data1604-swp.yaml b/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay-userdata.yaml
similarity index 70%
rename from tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data1604-swp.yaml
rename to tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay-userdata.yaml
index 5a4fc79..567a445 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data1604-swp.yaml
+++ b/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay-userdata.yaml
@@ -1,40 +1,33 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
- ssh_pwauth: True
- users:
+ssh_pwauth: True
+users:
- name: root
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
- disable_root: false
- chpasswd:
+disable_root: false
+chpasswd:
list: |
root:r00tme
expire: False
-
- bootcmd:
+bootcmd:
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
- output:
+output:
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
- runcmd:
+runcmd:
- if lvs vg0; then pvresize /dev/vda3; fi
- if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
- export TERM=linux
- export LANG=C
# Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- sudo resolvconf -u
-
+ #- sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
# Enable grub menu using updated config below
- update-grub
@@ -49,7 +42,7 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- write_files:
+write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
content: |
GRUB_RECORDFAIL_TIMEOUT=30
@@ -64,20 +57,20 @@
- path: /usr/share/growlvm/image-layout.yml
content: |
root:
- size: '30%VG'
+ size: '65%VG'
home:
- size: '1G'
+ size: '1%VG'
var_log:
- size: '11%VG'
+ size: '10%VG'
var_log_audit:
- size: '5G'
+ size: '5%VG'
var_tmp:
- size: '11%VG'
+ size: '10%VG'
tmp:
- size: '5G'
+ size: '5%VG'
owner: root:root
- growpart:
+growpart:
mode: auto
devices:
- '/'
diff --git a/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay.hot b/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay.hot
new file mode 100644
index 0000000..be3f68b
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay.hot
@@ -0,0 +1,978 @@
+---
+
+heat_template_version: queens
+
+description: MCP environment for heat-cicd-pike-dvr-sl
+
+parameters:
+ instance_domain:
+ type: string
+ default: heat-cicd-pike-dvr-sl.local
+ mcp_version:
+ type: string
+ env_name:
+ type: string
+ control_subnet_cidr:
+ type: string
+ management_subnet_cidr:
+ type: string
+ management_subnet_pool_start:
+ type: string
+ management_subnet_pool_end:
+ type: string
+ management_subnet_cfg01_ip:
+ type: string
+ management_subnet_gateway_ip:
+ type: string
+
+ key_pair:
+ type: string
+
+ ctl_flavor:
+ type: string
+ cfg_flavor:
+ type: string
+ cid_flavor:
+ type: string
+ kvm_fake_flavor:
+ type: string
+ dbs_flavor:
+ type: string
+ msg_flavor:
+ type: string
+ mon_flavor:
+ type: string
+ log_flavor:
+ type: string
+ mtr_flavor:
+ type: string
+ cmp_flavor:
+ type: string
+ foundation_flavor:
+ type: string
+ cmn_flavor:
+ type: string
+ rgw_flavor:
+ type: string
+ osd_flavor:
+ type: string
+ gtw_flavor:
+ type: string
+ dns_flavor:
+ type: string
+ kmn_flavor:
+ type: string
+ prx_flavor:
+ type: string
+
+ net_public:
+ type: string
+
+ foundation_image:
+ type: string
+
+resources:
+ networks:
+ type: MCP::Networks
+ properties:
+ stack_name: { get_param: "OS::stack_name" }
+ env_name: { get_param: env_name }
+
+ #flavors:
+ # type: MCP::Flavors
+
+ cfg01_node:
+ type: MCP::MasterNode
+ depends_on: [networks]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ cfg01_flavor: { get_param: cfg_flavor }
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '15' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '15' ]
+ instance_name: cfg01
+ instance_domain: {get_param: instance_domain}
+ network: { get_attr: [networks, network] }
+
+ control_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: ctl01
+ instance02_name: ctl02
+ instance03_name: ctl03
+ instance_flavor: {get_param: ctl_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '11' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '12' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '13' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '11' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '12' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '13' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '11' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '12' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '13' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ openstack_database_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [control_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: dbs01
+ instance02_name: dbs02
+ instance03_name: dbs03
+ instance_flavor: {get_param: dbs_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '51' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '52' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '53' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '51' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '52' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '53' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '51' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '52' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '53' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ fake_kvm_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: kvm01
+ instance02_name: kvm02
+ instance03_name: kvm03
+ instance_flavor: {get_param: kvm_fake_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '241' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '242' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '243' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '241' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '242' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '243' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '241' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '242' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '243' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ openstack_message_queue_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [openstack_database_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: msg01
+ instance02_name: msg02
+ instance03_name: msg03
+ instance_flavor: {get_param: msg_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '41' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '42' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '43' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '41' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '42' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '43' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '41' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '42' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '43' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cicd_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: cid01
+ instance02_name: cid02
+ instance03_name: cid03
+ instance_flavor: {get_param: cid_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '91' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '92' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '93' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '91' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '92' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '93' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '91' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '92' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '93' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ stacklight_monitor_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [openstack_message_queue_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: mon01
+ instance02_name: mon02
+ instance03_name: mon03
+ instance_flavor: {get_param: mon_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '71' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '72' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '73' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '71' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '72' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '73' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '71' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '72' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '73' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ stacklight_log_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [stacklight_monitor_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: log01
+ instance02_name: log02
+ instance03_name: log03
+ instance_flavor: {get_param: log_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '61' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '62' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '63' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '61' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '62' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '63' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '61' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '62' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '63' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ stacklight_mtr_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [stacklight_log_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: mtr01
+ instance02_name: mtr02
+ instance03_name: mtr03
+ instance_flavor: {get_param: mtr_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '97' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '98' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '99' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '97' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '98' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '99' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '97' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '98' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '99' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ prx01_virtual:
+ type: MCP::SingleInstance
+ depends_on: [control_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: prx01
+ instance_flavor: {get_param: prx_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '81' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '81' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '81' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ prx02_virtual:
+ type: MCP::SingleInstance
+ depends_on: [control_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: prx02
+ instance_flavor: {get_param: prx_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '82' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '82' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '82' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cmp001_virtual:
+ type: MCP::Compute
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: cmp001
+ instance_flavor: {get_param: cmp_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '101' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '101' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '101' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cmp002_virtual:
+ type: MCP::Compute
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: cmp002
+ instance_flavor: {get_param: cmp_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '102' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '102' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '102' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ foundation_node:
+ type: MCP::FoundationNode
+ depends_on: [networks]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: foundation
+ instance_image: { get_param: foundation_image }
+ instance_flavor: {get_param: foundation_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay--user-data-foundation.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '5' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '5' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '5' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ ceph_cmn_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: cmn01
+ instance02_name: cmn02
+ instance03_name: cmn03
+ instance_flavor: {get_param: cmn_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '66' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '67' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '68' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '66' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '67' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '68' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '66' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '67' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '68' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ ceph_rgw_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: rgw01
+ instance02_name: rgw02
+ instance03_name: rgw03
+ instance_flavor: {get_param: rgw_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '76' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '77' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '78' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '76' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '77' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '78' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '76' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '77' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '78' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ ceph_osd_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: osd001
+ instance02_name: osd002
+ instance03_name: osd003
+ instance_flavor: {get_param: osd_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '201' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '202' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '203' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '201' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '202' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '203' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '201' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '202' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '203' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ openstack_gtw_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: gtw01
+ instance02_name: gtw02
+ instance03_name: gtw03
+ instance_flavor: {get_param: gtw_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '224' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '225' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '226' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '224' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '225' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '226' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '224' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '225' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '226' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ openstack_barbican_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [control_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: kmn01
+ instance02_name: kmn02
+ instance03_name: kmn03
+ instance_flavor: {get_param: kmn_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '45' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '46' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '47' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '45' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '46' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '47' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '45' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '46' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '47' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ dns01_virtual:
+ type: MCP::SingleInstance
+ depends_on: [control_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: dns01
+ instance_flavor: {get_param: dns_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '113' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '113' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '113' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ dns02_virtual:
+ type: MCP::SingleInstance
+ depends_on: [control_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: dns02
+ instance_flavor: {get_param: dns_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '114' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '114' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '114' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+outputs:
+
+ control_subnet_cidr:
+ description: Control network CIDR
+ value: { get_param: control_subnet_cidr }
+
+ management_subnet_cidr:
+ description: Admin network CIDR
+ value: { get_param: management_subnet_cidr }
+
+ foundation_floating:
+ description: foundation node IP address (floating) from external network
+ value:
+ get_attr:
+ - foundation_node
+ - instance_floating_address
+...
diff --git a/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml b/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
new file mode 100644
index 0000000..b05f2fd
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
@@ -0,0 +1,369 @@
+default_context:
+ auditd_enabled: 'False'
+ backend_network_netmask: 255.255.255.0
+ backend_network_subnet: 10.167.4.0/24
+ backend_vlan: '10'
+ backup_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpQIBAAKCAQEAuY7v++mza4e75f80GYE2iIdZ30d7yvT6Xym00iD/OxRWNtXe
+ rIh7M0X30Q0F2D3hVvPz57axTheOK3xFRVvPoIZjm3fVgwNzQmTyfAZz4TOdTtWx
+ 9cye8Bo20qlRpq8wFQMSDhgRv0J1iX6LjJsr8pM1A8q3e4GYnv0DrLBZ1Iq7+T/k
+ qzzsT7PuvGvEK63J/DaE6BI73QG+0W0MvblddznwXvXLo/VlBXajWOv37YHiMFMT
+ Zap7lTvGVEyxByVEM04Bo7ABF2PEPwGrGL9nOpJ1LSxBCcryNVyZbveFF/e8A1Cj
+ 178rD+W4H5p2Agr5A/y3LZpTkyhnTtWXzwT3YwIDAQABAoIBACiUNa8lgHM3j8PQ
+ d5hMRZy93M2TWGMeB9Lf0AdT5/0HiYMcakHY5vhjiLpS2sBbZ/gYCXLW5Rdq11Bz
+ MMLmPRWhzg6lui+YhZAze0PcNWM+YlxnJy/Vu7xOP0b6eDy3exBdR4mFgfwNkJ6s
+ 6d+p34aA4ssdfdqokLPUKQWO21Y7UVYbht6Tv55nd3YMGXHxJ0phitf7/dFsEX9Z
+ sPSdWqkYMP2UWQBrFSjxV9Q+kE8OQ1VYDFCRa/9a5QHMrFo/0dOxLkZosTcCHM8A
+ H2RHPcKrxFWn7A3eAiA4VCvtM8RX239Bi7Gdvfl1HflSkQwBDUV8F2RZLHM2NU2T
+ EGBQcuECgYEA4ZBwZAtJIQ0R35prGLFj+drb/IKr+x2WD9WOZ83cheGSwdCRk/he
+ zZ5cCKgmSqg9cDJ4/vraoqmAlwQ4uj4e1TudgHPwdDUPuwoveIbUfUyzdIZMt0s4
+ fe61AUhEniIOi09H+E2yHz6OWSw3uA4SKkNsMT4RZc4Nag3Fo86Rrj8CgYEA0piY
+ HMYPHposfjVNM0PMU9F1lwQJMdx3a55JYgUc8cMvrsZPzvwJqrGCMNjP4lPwl/AS
+ x73yaxcxEYGiG6wDkvx+hujjyAx+sal62EB9ofJGDI7u8L2/0voW53RWvTUBsy8e
+ +xOQTewCAAYGLIJnGfEyVqEAu9IPwz3pep8xtd0CgYEAruTusDOr9SuMI0M5LQFG
+ UpHnJogvT1smYoqki0osZcZ8ozjT19aps2bJV5EBd7uxP5BzDsl0wtEIuo90aLwH
+ 7i/2NIYw9/m4g78nBZ4NnkXdk0karLhvSf3PbPoa8j3X5x6G4DlmFiHL/8pwPY7z
+ eL+kYR4OIVC+R+/7wcJGZMMCgYEAqOLg0epvw53mYoxCTgNoACvw/lupOAhS6MY2
+ mVn6XVOnkKTO6fIrmmziOGQXSq0APAi2NuL4XrNpkV2BcGmhMCY3Hd/0k8CZdcax
+ km0dk1skm/ugWQYCqKIQ7irZSMESjO0UDkwhJKxI6lXqa5VkM2S/dsOFQBp0s6GZ
+ 9NFn3y0CgYEAogzKchxouu4BgqHn76W0IB/XeTuiCDSGRv+IwMoghxbPoT6lO920
+ OHWoo+bX3VuxpCFkN2fFH6V8WncUrv4ItAgxGftL8h9BhMRKiatwOBAw0vG/CO2G
+ CIyvmjhIvpIdAl8i1jIJw1sn/ZVYm8+ZKy4VAqPevc3Ze7WGoMUkFyg=
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5ju/76bNrh7vl/zQZgTaIh1nfR3vK9PpfKbTSIP87FFY21d6siHszRffRDQXYPeFW8/PntrFOF44rfEVFW8+ghmObd9WDA3NCZPJ8BnPhM51O1bH1zJ7wGjbSqVGmrzAVAxIOGBG/QnWJfouMmyvykzUDyrd7gZie/QOssFnUirv5P+SrPOxPs+68a8Qrrcn8NoToEjvdAb7RbQy9uV13OfBe9cuj9WUFdqNY6/ftgeIwUxNlqnuVO8ZUTLEHJUQzTgGjsAEXY8Q/AasYv2c6knUtLEEJyvI1XJlu94UX97wDUKPXvysP5bgfmnYCCvkD/LctmlOTKGdO1ZfPBPdj
+ bmk_enabled: 'False'
+ ceph_cluster_network: 10.167.4.0/24
+ ceph_enabled: 'True'
+ ceph_hyper_converged: 'False'
+ ceph_mon_node01_address: 10.167.4.66
+ ceph_mon_node01_hostname: cmn01
+ ceph_mon_node02_address: 10.167.4.67
+ ceph_mon_node02_hostname: cmn02
+ ceph_mon_node03_address: 10.167.4.68
+ ceph_mon_node03_hostname: cmn03
+ ceph_osd_backend: bluestore
+ ceph_osd_block_db_size: '3'
+ ceph_osd_data_partition_prefix: ""
+ ceph_osd_bond_mode: active-backup
+ ceph_osd_count: '3'
+ ceph_osd_data_disks: "/dev/vdb"
+ ceph_osd_journal_or_block_db_disks: "/dev/vdb"
+ ceph_osd_mode: "separated"
+ ceph_osd_node_count: '3'
+ ceph_osd_journal_size: '3'
+ ceph_osd_deploy_nic: "eth0"
+ ceph_osd_primary_first_nic: eth1
+ ceph_osd_primary_second_nic: eth2
+ ceph_osd_single_address_ranges: 10.167.4.201-10.167.4.203
+ ceph_osd_deploy_address_ranges: 10.167.5.70-10.167.5.72
+ ceph_osd_backend_address_ranges: 10.167.6.201-10.167.6.203
+ ceph_osd_storage_address_ranges: 10.167.4.201-10.167.4.203
+ ceph_public_network: 10.167.4.0/24
+ ceph_public_network_allocation: storage
+ ceph_rgw_address: 10.167.4.75
+ ceph_rgw_hostname: rgw
+ ceph_rgw_node01_address: 10.167.4.76
+ ceph_rgw_node01_hostname: rgw01
+ ceph_rgw_node02_address: 10.167.4.77
+ ceph_rgw_node02_hostname: rgw02
+ ceph_rgw_node03_address: 10.167.4.78
+ ceph_rgw_node03_hostname: rgw03
+ ceph_version: luminous
+ cicd_control_node01_address: 10.167.4.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.4.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.4.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.4.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
+ 3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
+ AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
+ xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
+ B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
+ q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
+ s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
+ V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
+ 9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
+ pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
+ MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
+ 7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
+ udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
+ R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
+ XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
+ Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
+ KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
+ 6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
+ ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
+ ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
+ Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
+ r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
+ mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
+ qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
+ 9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
+ cluster_domain: heat-cicd-queens-dvr-sl.local
+ cluster_name: heat-cicd-queens-dvr-sl
+ compute_bond_mode: active-backup
+ compute_padding_with_zeros: 'True'
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: tekHhhWzn3YrxKbXGMvtWYj1usHGrRBYd2gfFwWNCnRentwCu1QKANHvpIeZCRvz
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.4.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: 'proposed'
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+ deploy_network_gateway: 10.167.5.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: 10.167.5.0/24
+ deployment_type: physical
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
+ email_address: test@mirantis.com
+ gainsight_service_enabled: 'False'
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ gnocchi_aggregation_storage: ceph
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.4.241
+ infra_kvm01_deploy_address: 10.167.5.67
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.4.242
+ infra_kvm02_deploy_address: 10.167.5.68
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.4.243
+ infra_kvm03_deploy_address: 10.167.5.69
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.4.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ internal_proxy_enabled: 'False'
+ kubernetes_ctl_on_kvm: 'False'
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 10.167.5.15
+ maas_deploy_network_name: deploy_network
+ maas_deploy_range_end: 10.167.5.230
+ maas_deploy_range_start: 10.167.5.20
+ maas_deploy_vlan: '0'
+ maas_enabled: 'False'
+ maas_fabric_name: deploy_fabric
+ maas_hostname: cfg01
+ mcp_common_scripts_branch: 'proposed'
+ mcp_version: proposed
+ no_platform: 'False'
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openldap_domain: ${_param:cluster_name}.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ openssh_groups: cicd
+ openstack_benchmark_node01_address: 10.167.4.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 10.167.4
+ openstack_compute_rack01_tenant_subnet: 10.167.6
+ openstack_compute_single_address_ranges: 10.167.4.101-10.167.4.102
+ openstack_compute_deploy_address_ranges: 10.167.5.73-10.167.5.74
+ openstack_compute_tenant_address_ranges: 10.167.6.101-10.167.6.102
+ openstack_compute_backend_address_ranges: 10.167.6.101-10.167.6.102
+ openstack_control_address: 10.167.4.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.4.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.4.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.4.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.4.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.4.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.4.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.4.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 10.167.4.224
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 10.167.6.224
+ openstack_gateway_node02_address: 10.167.4.225
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.167.6.225
+ openstack_gateway_node03_address: 10.167.4.226
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 10.167.6.226
+ openstack_message_queue_address: 10.167.4.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.4.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.4.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.4.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: ovs
+ openstack_neutron_bgp_vpn: 'False'
+ openstack_neutron_bgp_vpn_driver: bagpipe
+ openstack_neutron_qos: 'False'
+ openstack_neutron_vlan_aware_vms: 'False'
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_nova_compute_reserved_host_memory_mb: '900'
+ openstack_ovs_dvr_enabled: 'True'
+ openstack_ovs_encapsulation_type: vxlan
+ #openstack_proxy_address: 172.17.16.80 # external network endpoint
+ openstack_proxy_address: 10.167.4.80 # external network endpoint
+ openstack_proxy_vip_interface: ens5
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.4.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.4.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.4.19
+ openstack_version: queens
+ oss_enabled: 'False'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: BX7ium4MaRPIWBdyhj4LTbiedwg3yLep
+ salt_api_password_hash: $6$qYqzkiRP$MiqA5ZMfsmdXJcuTTyeCgNPv9CBGO5nSH4HwRKPGUh0MFXcEa8JDCUEtS8xLHCkol7CMdq.l6CG7of0iaUJ.u.
+ salt_master_address: 10.167.4.15
+ salt_master_hostname: cfg01
+ salt_master_management_address: 10.167.5.15
+ shared_reclass_branch: 'proposed'
+ shared_reclass_url: https://github.com/Mirantis/reclass-system-salt-model.git
+ sriov_network_subnet: 10.55.0.0/16
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.167.6.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.167.6.0/24
+ tenant_telemetry_enabled: 'False'
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
+ version: proposed
+ vnf_onboarding_enabled: 'False'
+ openstack_telemetry_address: 10.167.4.83
+ openstack_telemetry_hostname: mdb
+ openstack_telemetry_node01_address: 10.167.4.84
+ openstack_telemetry_node01_hostname: mdb01
+ openstack_telemetry_node02_address: 10.167.4.85
+ openstack_telemetry_node02_hostname: mdb02
+ openstack_telemetry_node03_address: 10.167.4.86
+ openstack_telemetry_node03_hostname: mdb03
+ fluentd_enabled: 'True'
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.4.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.4.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.4.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.4.63
+ stacklight_log_node03_hostname: log03
+ stacklight_monitor_address: 10.167.4.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.4.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.4.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.4.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.4.96
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.4.97
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.4.98
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.4.99
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ stacklight_long_term_storage_type: prometheus
+ nova_vnc_tls_enabled: 'True'
+ galera_ssl_enabled: 'True'
+ openstack_mysql_x509_enabled: 'True'
+ rabbitmq_ssl_enabled: 'True'
+ openstack_rabbitmq_x509_enabled: 'True'
+ openstack_internal_protocol: 'https'
+ openstack_create_public_network: 'True'
+
+ openstack_public_neutron_subnet_gateway: 10.9.0.1
+ openstack_public_neutron_subnet_cidr: 10.9.0.0/24
+ openstack_public_neutron_subnet_allocation_start: 10.9.0.201
+ openstack_public_neutron_subnet_allocation_end: 10.9.0.245
+
+ #openstack_public_neutron_subnet_gateway: 172.17.16.1
+ #openstack_public_neutron_subnet_cidr: 172.17.16.0/24
+ #openstack_public_neutron_subnet_allocation_start: 172.17.16.201
+ #openstack_public_neutron_subnet_allocation_end: 172.17.16.245
+ manila_enabled: 'False'
+ barbican_enabled: 'True'
+ barbican_integration_enabled: 'False'
+
+ openstack_barbican_address: 10.167.4.44
+ openstack_barbican_hostname: kmn
+ openstack_barbican_node01_address: 10.167.4.45
+ openstack_barbican_node01_hostname: kmn01
+ openstack_barbican_node02_address: 10.167.4.46
+ openstack_barbican_node02_hostname: kmn02
+ openstack_barbican_node03_address: 10.167.4.47
+ openstack_barbican_node03_hostname: kmn03
+
+ designate_backend: powerdns
+ designate_enabled: 'True'
+ openstack_dns_node01_address: 10.167.4.113
+ openstack_dns_node02_address: 10.167.4.114
+ octavia_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEAtjnPDJsQToHBtoqIo15mdSYpfi8z6DFMi8Gbo0KCN33OUn5u
+ OctbdtjUfeuhvI6px1SCnvyWi09Ft8eWwq+KwLCGKbUxLvqKltuJ7K3LIrGXkt+m
+ qZN4O9XKeVKfZH+mQWkkxRWgX2r8RKNV3GkdNtd74VjhP+R6XSKJQ1Z8b7eHM10v
+ 6IjTY/jPczjK+eyCeEj4qbSnV8eKlqLhhquuSQRmUO2DRSjLVdpdf2BB4/BdWFsD
+ YOmX7mb8kpEr9vQ+c1JKMXDwD6ehzyU8kE+1kVm5zOeEy4HdYIMpvUfN49P1anRV
+ 2ISQ1ZE+r22IAMKl0tekrGH0e/1NP1DF5rINMwIDAQABAoIBAQCkP/cgpaRNHyg8
+ ISKIHs67SWqdEm73G3ijgB+JSKmW2w7dzJgN//6xYUAnP/zIuM7PnJ0gMQyBBTMS
+ NBTv5spqZLKJZYivj6Tb1Ya8jupKm0jEWlMfBo2ZYVrfgFmrfGOfEebSvmuPlh9M
+ vuzlftmWVSSUOkjODmM9D6QpzgrbpktBuA/WpX+6esMTwJpOcQ5xZWEnHXnVzuTc
+ SncodVweE4gz6F1qorbqIJz8UAUQ5T0OZTdHzIS1IbamACHWaxQfixAO2s4+BoUK
+ ANGGZWkfneCxx7lthvY8DiKn7M5cSRnqFyDToGqaLezdkMNlGC7v3U11FF5blSEW
+ fL1o/HwBAoGBAOavhTr8eqezTchqZvarorFIq7HFWk/l0vguIotu6/wlh1V/KdF+
+ aLLHgPgJ5j+RrCMvTBoKqMeeHfVGrS2udEy8L1mK6b3meG+tMxU05OA55abmhYn7
+ 7vF0q8XJmYIHIXmuCgF90R8Piscb0eaMlmHW9unKTKo8EOs5j+D8+AMJAoGBAMo4
+ 8WW+D3XiD7fsymsfXalf7VpAt/H834QTbNZJweUWhg11eLutyahyyfjjHV200nNZ
+ cnU09DWKpBbLg7d1pyT69CNLXpNnxuWCt8oiUjhWCUpNqVm2nDJbUdlRFTzYb2fS
+ ZC4r0oQaPD5kMLSipjcwzMWe0PniySxNvKXKInFbAoGBAKxW2qD7uKKKuQSOQUft
+ aAksMmEIAHWKTDdvOA2VG6XvX5DHBLXmy08s7rPfqW06ZjCPCDq4Velzvgvc9koX
+ d/lP6cvqlL9za+x6p5wjPQ4rEt/CfmdcmOE4eY+1EgLrUt314LHGjjG3ScWAiirE
+ QyDrGOIGaYoQf89L3KqIMr0JAoGARYAklw8nSSCUvmXHe+Gf0yKA9M/haG28dCwo
+ 780RsqZ3FBEXmYk1EYvCFqQX56jJ25MWX2n/tJcdpifz8Q2ikHcfiTHSI187YI34
+ lKQPFgWb08m1NnwoWrY//yx63BqWz1vjymqNQ5GwutC8XJi5/6Xp+tGGiRuEgJGH
+ EIPUKpkCgYAjBIVMkpNiLCREZ6b+qjrPV96ed3iTUt7TqP7yGlFI/OkORFS38xqC
+ hBP6Fk8iNWuOWQD+ohM/vMMnvIhk5jwlcwn+kF0ra04gi5KBFWSh/ddWMJxUtPC1
+ 2htvlEc6zQAR6QfqXHmwhg1hP81JcpqpicQzCMhkzLoR1DC6stXdLg==
+ -----END RSA PRIVATE KEY-----
+ octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2Oc8MmxBOgcG2ioijXmZ1Jil+LzPoMUyLwZujQoI3fc5Sfm45y1t22NR966G8jqnHVIKe/JaLT0W3x5bCr4rAsIYptTEu+oqW24nsrcsisZeS36apk3g71cp5Up9kf6ZBaSTFFaBfavxEo1XcaR0213vhWOE/5HpdIolDVnxvt4czXS/oiNNj+M9zOMr57IJ4SPiptKdXx4qWouGGq65JBGZQ7YNFKMtV2l1/YEHj8F1YWwNg6ZfuZvySkSv29D5zUkoxcPAPp6HPJTyQT7WRWbnM54TLgd1ggym9R83j0/VqdFXYhJDVkT6vbYgAwqXS16SsYfR7/U0/UMXmsg0z
+ openstack_octavia_enabled: 'True'
+ octavia_health_manager_node01_address: 192.168.1.10
+ octavia_health_manager_node02_address: 192.168.1.11
+ octavia_health_manager_node03_address: 192.168.1.12
+ octavia_manager_cluster: 'True'
+ octavia_hm_bind_ip: 192.168.1.12
+ octavia_lb_mgmt_cidr: 192.168.1.0/24
+ octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
+ octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
+ cinder_backup_engine: 'ceph'
+ cinder_ceph_backup_pool_name: 'backups'
+ jenkins_pipelines_branch: 'release/2019.2.0'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt-context-environment.yaml b/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt-context-environment.yaml
similarity index 63%
rename from tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt-context-environment.yaml
rename to tcp_tests/templates/heat-cicd-queens-dvr-sl/salt-context-environment.yaml
index 025f4e7..6b3ca8e 100644
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas-2018.8.0/salt-context-environment.yaml
+++ b/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt-context-environment.yaml
@@ -1,395 +1,500 @@
-nodes:
- # Virtual Control Plane nodes
- cid01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: cicd_control_node01
- roles:
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- cid02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: cicd_control_node02
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- cid03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: cicd_control_node03
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ctl01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ctl02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ctl03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- dbs01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- dbs02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- dbs03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- msg01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
+nodes:
+ cfg01:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ - features_runtest_cfg
interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- msg02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- msg03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- prx01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- prx02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mon01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mon02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mon03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- nal01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node01
- roles:
- - opencontrail_analytics
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- nal02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node02
- roles:
- - opencontrail_analytics
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- nal03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node03
- roles:
- - opencontrail_analytics
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ntw01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node01
- roles:
- - opencontrail_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ntw02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node02
- roles:
- - opencontrail_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- ntw03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node03
- roles:
- - opencontrail_control
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mtr01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mtr02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- mtr03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- log01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- log02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
-
- log03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens2:
- role: single_dhcp
- ens3:
- role: single_ctl
+ ens3:
+ role: single_static_mgm
+ ens4:
+ role: single_static_ctl
+ ens6:
+ role: single_external
+ external_address: 10.9.0.15
+ external_network_netmask: 255.255.255.0
- cmn01.cookied-bm-mcp-ocata-contrail.local:
+ cid01:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid02:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid03:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl01:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl02:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl03:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dbs01:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dbs02:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dbs03:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ msg01:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ msg02:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ msg03:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx01:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens6:
+ role: single_external
+ external_address: 10.9.0.121
+ external_network_netmask: 255.255.255.0
+
+ prx02:
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens6:
+ role: single_external
+ external_address: 10.9.0.122
+ external_network_netmask: 255.255.255.0
+
+ mon01:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon02:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon03:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr01:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr02:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr03:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log01:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log02:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log03:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kvm01:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kvm02:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kvm03:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cmp<<count>>:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw01:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw02:
+ reclass_storage_name: openstack_gateway_node02
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw03:
+ reclass_storage_name: openstack_gateway_node03
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ osd<<count>>:
+ reclass_storage_name: ceph_osd_rack01
+ roles:
+ - ceph_osd
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cmn01:
reclass_storage_name: ceph_mon_node01
roles:
- ceph_mon
- linux_system_codename_xenial
interfaces:
- ens2:
- role: single_dhcp
ens3:
+ role: single_dhcp
+ ens4:
role: single_ctl
- cmn02.cookied-bm-mcp-ocata-contrail.local:
+ cmn02:
reclass_storage_name: ceph_mon_node02
roles:
- ceph_mon
- linux_system_codename_xenial
interfaces:
- ens2:
- role: single_dhcp
ens3:
+ role: single_dhcp
+ ens4:
role: single_ctl
- cmn03.cookied-bm-mcp-ocata-contrail.local:
+ cmn03:
reclass_storage_name: ceph_mon_node03
roles:
- ceph_mon
- linux_system_codename_xenial
interfaces:
- ens2:
- role: single_dhcp
ens3:
+ role: single_dhcp
+ ens4:
role: single_ctl
- rgw01.cookied-bm-mcp-ocata-contrail.local:
+ rgw01:
reclass_storage_name: ceph_rgw_node01
roles:
- ceph_rgw
- linux_system_codename_xenial
interfaces:
- ens2:
- role: single_dhcp
ens3:
+ role: single_dhcp
+ ens4:
role: single_ctl
- rgw02.cookied-bm-mcp-ocata-contrail.local:
+ rgw02:
reclass_storage_name: ceph_rgw_node02
roles:
- ceph_rgw
- linux_system_codename_xenial
interfaces:
- ens2:
- role: single_dhcp
ens3:
+ role: single_dhcp
+ ens4:
role: single_ctl
- rgw03.cookied-bm-mcp-ocata-contrail.local:
+ rgw03:
reclass_storage_name: ceph_rgw_node03
roles:
- ceph_rgw
- linux_system_codename_xenial
interfaces:
- ens2:
- role: single_dhcp
ens3:
+ role: single_dhcp
+ ens4:
role: single_ctl
-# bmk01.cookied-bm-mcp-ocata-contrail.local:
-# reclass_storage_name: openstack_benchmark_node01
-# roles:
-# - openstack_benchmark
-# - linux_system_codename_xenial
-# interfaces:
-# ens3:
-# role: single_ctl
+ kmn01:
+ reclass_storage_name: openstack_barbican_node01
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kmn02:
+ reclass_storage_name: openstack_barbican_node02
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kmn03:
+ reclass_storage_name: openstack_barbican_node03
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dns01:
+ reclass_storage_name: openstack_dns_node01
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dns02:
+ reclass_storage_name: openstack_dns_node02
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt.yaml b/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt.yaml
new file mode 100644
index 0000000..e02121f
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt.yaml
@@ -0,0 +1,14 @@
+{% set HOSTNAME_CFG01='cfg01.heat-cicd-queens-dvr-sl.local' %}
+{% set LAB_CONFIG_NAME='heat-cicd-queens-dvr-sl' %}
+{% set DOMAIN_NAME='heat-cicd-queens-dvr-sl.local' %}
+
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
diff --git a/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay--user-data-foundation.yaml b/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay--user-data-foundation.yaml
new file mode 100644
index 0000000..1677dcd
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay--user-data-foundation.yaml
@@ -0,0 +1,64 @@
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ssh_pwauth: True
+users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ - name: jenkins
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFSxeuXh2sO4VYL8N2dlNFVyNcr2RvoH4MeDD/cV2HThfU4/BcH6IOOWXSDibIU279bWVKCL7QUp3mf0Vf7HPuyFuC12QM+l7MwT0jCYh5um3hmAvM6Ga0nkhJygHexe9/rLEYzZJkIjP9/IS/YXSv8rhHg484wQ6qkEuq15nyMqil8tbDQCq0XQ+AWEpNpIa4pUoKmFMsOP8lq10KZXIXsJyZxizadr6Bh4Lm9LWrk8YCw7qP3rmgWxK/s8qXQh1ISZe6ONfcmk6p03qbh4H3CwKyWzxmnIHQvE6PgN/O+PuAZj3PbR2mkkJjYX4jNPlxvj8uTStaVPhAwfR9Spdx jenkins@cz8133
+
+disable_root: false
+chpasswd:
+ list: |
+ root:r00tme
+ jenkins:qalab
+ expire: False
+
+packages:
+ - openjdk-8-jre-headless
+ - libyaml-dev
+ - libffi-dev
+ - libvirt-dev
+ - python-dev
+ - python-pip
+ - python-virtualenv
+ #- python-psycopg2
+ - pkg-config
+ - vlan
+ - bridge-utils
+ - ebtables
+
+bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+runcmd:
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /etc/bash_completion.d/fuel_devops30_activate
+ content: |
+ source /home/jenkins/fuel-devops30/bin/activate
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data1604-swp.yaml b/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay-userdata.yaml
similarity index 70%
copy from tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data1604-swp.yaml
copy to tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay-userdata.yaml
index 5a4fc79..567a445 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data1604-swp.yaml
+++ b/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay-userdata.yaml
@@ -1,40 +1,33 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
- ssh_pwauth: True
- users:
+ssh_pwauth: True
+users:
- name: root
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
- disable_root: false
- chpasswd:
+disable_root: false
+chpasswd:
list: |
root:r00tme
expire: False
-
- bootcmd:
+bootcmd:
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
- output:
+output:
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
- runcmd:
+runcmd:
- if lvs vg0; then pvresize /dev/vda3; fi
- if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
- export TERM=linux
- export LANG=C
# Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- sudo resolvconf -u
-
+ #- sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
# Enable grub menu using updated config below
- update-grub
@@ -49,7 +42,7 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- write_files:
+write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
content: |
GRUB_RECORDFAIL_TIMEOUT=30
@@ -64,20 +57,20 @@
- path: /usr/share/growlvm/image-layout.yml
content: |
root:
- size: '30%VG'
+ size: '65%VG'
home:
- size: '1G'
+ size: '1%VG'
var_log:
- size: '11%VG'
+ size: '10%VG'
var_log_audit:
- size: '5G'
+ size: '5%VG'
var_tmp:
- size: '11%VG'
+ size: '10%VG'
tmp:
- size: '5G'
+ size: '5%VG'
owner: root:root
- growpart:
+growpart:
mode: auto
devices:
- '/'
diff --git a/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay.hot b/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay.hot
new file mode 100644
index 0000000..bc0fa6d
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay.hot
@@ -0,0 +1,954 @@
+---
+
+heat_template_version: queens
+
+description: MCP environment for heat-cicd-queens-dvr-sl
+
+parameters:
+ instance_domain:
+ type: string
+ default: heat-cicd-queens-dvr-sl.local
+ mcp_version:
+ type: string
+ env_name:
+ type: string
+ control_subnet_cidr:
+ type: string
+ management_subnet_cidr:
+ type: string
+ management_subnet_pool_start:
+ type: string
+ management_subnet_pool_end:
+ type: string
+ management_subnet_cfg01_ip:
+ type: string
+ management_subnet_gateway_ip:
+ type: string
+
+ key_pair:
+ type: string
+
+ ctl_flavor:
+ type: string
+ cfg_flavor:
+ type: string
+ cid_flavor:
+ type: string
+ kvm_fake_flavor:
+ type: string
+ dbs_flavor:
+ type: string
+ msg_flavor:
+ type: string
+ mon_flavor:
+ type: string
+ log_flavor:
+ type: string
+ mtr_flavor:
+ type: string
+ cmp_flavor:
+ type: string
+ foundation_flavor:
+ type: string
+ cmn_flavor:
+ type: string
+ rgw_flavor:
+ type: string
+ osd_flavor:
+ type: string
+ gtw_flavor:
+ type: string
+ dns_flavor:
+ type: string
+ kmn_flavor:
+ type: string
+ prx_flavor:
+ type: string
+
+ net_public:
+ type: string
+
+ foundation_image:
+ type: string
+
+resources:
+ networks:
+ type: MCP::Networks
+ properties:
+ stack_name: { get_param: "OS::stack_name" }
+ env_name: { get_param: env_name }
+
+ #flavors:
+ # type: MCP::Flavors
+
+ cfg01_node:
+ type: MCP::MasterNode
+ depends_on: [networks]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ cfg01_flavor: { get_param: cfg_flavor }
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '15' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '15' ]
+ instance_name: cfg01
+ instance_domain: {get_param: instance_domain}
+ network: { get_attr: [networks, network] }
+
+ control_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: ctl01
+ instance02_name: ctl02
+ instance03_name: ctl03
+ instance_flavor: {get_param: ctl_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '11' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '12' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '13' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '11' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '12' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '13' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '11' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '12' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '13' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ openstack_database_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [control_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: dbs01
+ instance02_name: dbs02
+ instance03_name: dbs03
+ instance_flavor: {get_param: dbs_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '51' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '52' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '53' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '51' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '52' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '53' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '51' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '52' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '53' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ fake_kvm_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: kvm01
+ instance02_name: kvm02
+ instance03_name: kvm03
+ instance_flavor: {get_param: kvm_fake_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '241' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '242' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '243' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '241' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '242' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '243' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '241' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '242' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '243' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ openstack_message_queue_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [openstack_database_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: msg01
+ instance02_name: msg02
+ instance03_name: msg03
+ instance_flavor: {get_param: msg_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '41' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '42' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '43' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '41' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '42' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '43' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '41' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '42' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '43' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cicd_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: cid01
+ instance02_name: cid02
+ instance03_name: cid03
+ instance_flavor: {get_param: cid_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '91' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '92' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '93' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '91' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '92' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '93' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '91' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '92' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '93' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ stacklight_monitor_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [openstack_message_queue_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: mon01
+ instance02_name: mon02
+ instance03_name: mon03
+ instance_flavor: {get_param: mon_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '71' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '72' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '73' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '71' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '72' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '73' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '71' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '72' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '73' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ stacklight_log_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [stacklight_monitor_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: log01
+ instance02_name: log02
+ instance03_name: log03
+ instance_flavor: {get_param: log_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '61' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '62' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '63' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '61' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '62' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '63' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '61' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '62' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '63' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ stacklight_mtr_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [stacklight_log_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: mtr01
+ instance02_name: mtr02
+ instance03_name: mtr03
+ instance_flavor: {get_param: mtr_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '97' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '98' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '99' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '97' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '98' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '99' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '97' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '98' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '99' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ prx01_virtual:
+ type: MCP::SingleInstance
+ depends_on: [control_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: prx01
+ instance_flavor: {get_param: prx_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '81' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '81' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '81' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ prx02_virtual:
+ type: MCP::SingleInstance
+ depends_on: [control_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: prx02
+ instance_flavor: {get_param: prx_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '82' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '82' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '82' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cmp001_virtual:
+ type: MCP::Compute
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: cmp001
+ instance_flavor: {get_param: cmp_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '101' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '101' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '101' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cmp002_virtual:
+ type: MCP::Compute
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: cmp002
+ instance_flavor: {get_param: cmp_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '102' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '102' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '102' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ foundation_node:
+ type: MCP::FoundationNode
+ depends_on: [networks]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: foundation
+ instance_image: { get_param: foundation_image }
+ instance_flavor: {get_param: foundation_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay--user-data-foundation.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '5' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '5' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '5' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ ceph_cmn_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: cmn01
+ instance02_name: cmn02
+ instance03_name: cmn03
+ instance_flavor: {get_param: cmn_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '66' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '67' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '68' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '66' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '67' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '68' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '66' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '67' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '68' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ ceph_rgw_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: rgw01
+ instance02_name: rgw02
+ instance03_name: rgw03
+ instance_flavor: {get_param: rgw_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '76' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '77' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '78' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '76' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '77' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '78' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '76' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '77' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '78' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ ceph_osd_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: osd001
+ instance02_name: osd002
+ instance03_name: osd003
+ instance_flavor: {get_param: osd_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '201' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '202' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '203' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '201' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '202' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '203' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '201' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '202' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '203' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ openstack_gtw_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: gtw01
+ instance02_name: gtw02
+ instance03_name: gtw03
+ instance_flavor: {get_param: gtw_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '224' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '225' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '226' ]
+ instance01_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '224' ]
+ instance02_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '225' ]
+ instance03_tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '226' ]
+ instance01_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '224' ]
+ instance02_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '225' ]
+ instance03_external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '226' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ openstack_barbican_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [control_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: kmn01
+ instance02_name: kmn02
+ instance03_name: kmn03
+ instance_flavor: {get_param: kmn_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '45' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '46' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '47' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ dns01_virtual:
+ type: MCP::SingleInstance
+ depends_on: [control_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: dns01
+ instance_flavor: {get_param: dns_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '113' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '113' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '113' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ dns02_virtual:
+ type: MCP::SingleInstance
+ depends_on: [control_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: dns02
+ instance_flavor: {get_param: dns_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '114' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '114' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '114' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+outputs:
+
+ control_subnet_cidr:
+ description: Control network CIDR
+ value: { get_param: control_subnet_cidr }
+
+ management_subnet_cidr:
+ description: Admin network CIDR
+ value: { get_param: management_subnet_cidr }
+
+ foundation_floating:
+ description: foundation node IP address (floating) from external network
+ value:
+ get_attr:
+ - foundation_node
+ - instance_floating_address
+...
diff --git a/tcp_tests/templates/k8s-ha-contrail/core.yaml b/tcp_tests/templates/k8s-ha-contrail/core.yaml
deleted file mode 100644
index e5eb9d7..0000000
--- a/tcp_tests/templates/k8s-ha-contrail/core.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
-{% from 'k8s-ha-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Create and distribute SSL certificates for services using salt state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls salt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Install docker
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@docker:host' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check docker
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@docker:host' cmd.run 'docker ps'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keepalived on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run "rabbitmqctl cluster_status"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
diff --git a/tcp_tests/templates/k8s-ha-contrail/juniper.conf b/tcp_tests/templates/k8s-ha-contrail/juniper.conf
deleted file mode 100644
index 398a257..0000000
--- a/tcp_tests/templates/k8s-ha-contrail/juniper.conf
+++ /dev/null
@@ -1,110 +0,0 @@
-## Last commit: 2017-05-18 08:39:52 UTC by root
-version 12.1X46-D20.5;
-system {
- host-name vsrx1;
- root-authentication {
- encrypted-password "$1$gpbfk/Jr$lF2foqHYBd/Sp56dlmkXH1"; ## SECRET-DATA
- }
- name-server {
- 8.8.8.8;
- 8.8.4.4;
- }
- services {
- ssh;
- web-management {
- http {
- interface ge-0/0/0.0;
- }
- }
- }
- syslog {
- file messages {
- any any;
- }
- }
- license {
- autoupdate {
- url https://ae1.juniper.net/junos/key_retrieval;
- }
- }
- ntp {
- peer 46.243.48.4;
- peer 147.251.48.140;
- peer 46.243.48.88;
- }
-}
-interfaces {
- ge-0/0/0 {
- unit 0 {
- family inet {
- address 172.16.10.90/24;
- }
- }
- }
- ge-0/0/1 {
- unit 0 {
- family inet {
- address 192.168.10.90/24;
- }
- }
- }
- ge-0/0/2 {
- unit 0 {
- family inet {
- address 10.70.0.91/24;
- }
- }
- }
-}
-routing-options {
- route-distinguisher-id 172.16.10.90;
- autonomous-system 64512;
- dynamic-tunnels {
- dynamic_overlay_tunnels {
- source-address 172.16.10.90;
- gre;
- destination-networks {
- 172.16.10.0/24;
- }
- }
- }
-}
-protocols {
- mpls {
- interface all;
- }
- bgp {
- group Contrail_Controller {
- type internal;
- local-address 172.16.10.90;
- keep all;
- family inet-vpn {
- unicast;
- }
- allow 172.16.10.0/24;
- }
- }
-}
-security {
- forwarding-options {
- family {
- mpls {
- mode packet-based;
- }
- }
- }
-}
-routing-instances {
- public {
- instance-type vrf;
- interface ge-0/0/1.0;
- vrf-target target:64512:10000;
- vrf-table-label;
- routing-options {
- static {
- route 192.168.10.0/24 discard;
- route 0.0.0.0/0 next-hop 192.168.10.1;
- }
- }
- }
-}
diff --git a/tcp_tests/templates/k8s-ha-contrail/juniper.conf.template b/tcp_tests/templates/k8s-ha-contrail/juniper.conf.template
deleted file mode 100644
index e7eed4a..0000000
--- a/tcp_tests/templates/k8s-ha-contrail/juniper.conf.template
+++ /dev/null
@@ -1,110 +0,0 @@
-## Last commit: 2017-05-18 08:39:52 UTC by root
-version 12.1X46-D20.5;
-system {
- host-name vsrx1;
- root-authentication {
- encrypted-password "$1$gpbfk/Jr$lF2foqHYBd/Sp56dlmkXH1"; ## SECRET-DATA
- }
- name-server {
- 8.8.8.8;
- 8.8.4.4;
- }
- services {
- ssh;
- web-management {
- http {
- interface ge-0/0/0.0;
- }
- }
- }
- syslog {
- file messages {
- any any;
- }
- }
- license {
- autoupdate {
- url https://ae1.juniper.net/junos/key_retrieval;
- }
- }
- ntp {
- peer 46.243.48.4;
- peer 147.251.48.140;
- peer 46.243.48.88;
- }
-}
-interfaces {
- ge-0/0/0 {
- unit 0 {
- family inet {
- address {{ private_address }}/24;
- }
- }
- }
- ge-0/0/1 {
- unit 0 {
- family inet {
- address {{ public_address }}/24;
- }
- }
- }
- ge-0/0/2 {
- unit 0 {
- family inet {
- address {{ admin_address }};
- }
- }
- }
-}
-routing-options {
- route-distinguisher-id {{ private_address }};
- autonomous-system 64512;
- dynamic-tunnels {
- dynamic_overlay_tunnels {
- source-address {{ private_address }};
- gre;
- destination-networks {
- {{ private_network }}/24;
- }
- }
- }
-}
-protocols {
- mpls {
- interface all;
- }
- bgp {
- group Contrail_Controller {
- type internal;
- local-address 172.16.10.90;
- keep all;
- family inet-vpn {
- unicast;
- }
- allow {{ private_network }}/24;
- }
- }
-}
-security {
- forwarding-options {
- family {
- mpls {
- mode packet-based;
- }
- }
- }
-}
-routing-instances {
- public {
- instance-type vrf;
- interface ge-0/0/1.0;
- vrf-target target:64512:10000;
- vrf-table-label;
- routing-options {
- static {
- route {{ public_network }} discard;
- route 0.0.0.0/0 next-hop {{ public_network_gateway }};
- }
- }
- }
-}
diff --git a/tcp_tests/templates/k8s-ha-contrail/k8s.yaml b/tcp_tests/templates/k8s-ha-contrail/k8s.yaml
deleted file mode 100644
index 932cc5b..0000000
--- a/tcp_tests/templates/k8s-ha-contrail/k8s.yaml
+++ /dev/null
@@ -1,139 +0,0 @@
-{% from 'k8s-ha-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-- description: Install etcd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@etcd:server' state.sls etcd.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the etcd health
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@etcd:server' cmd.run '. /var/lib/etcd/configenv && etcdctl cluster-health'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- # Opencontrail Control Plane
-
-- description: Install Opencontrail db on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail db on all nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database' state.sls opencontrail.database
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 20}
- skip_fail: false
-
-- description: Install Opencontrail control on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail control on all nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail on collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# OpenContrail vrouters
-- description: Install Opencontrail client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Wake up vhost0
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:compute' cmd.run 'exec 0>&-; exec 1>&-; exec 2>&-;
- nohup bash -c "ip link | grep vhost && echo no_reboot || sleep 5 && reboot & "'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail client on computes
- cmd: sleep 300 && salt --timeout=60 --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls 'opencontrail.client'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail on computes #2
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls opencontrail
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-# Kubernetes
-- description: Install Kubernetes Addons
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Install Kubernetes components
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:pool' state.sls kubernetes.pool
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 60}
- skip_fail: false
-
-# NOTE(vryzhenkin): There is nothing to setup at this model
-#- description: Setup etcd server on primary controller
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@kubernetes:master and *01*' state.sls etcd.server.setup
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-- description: Run Kubernetes master without setup
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: true
-
-- description: Run Kubernetes master setup
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master and *01*' state.sls kubernetes.master.setup
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Restart Kubelet
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:pool' service.restart 'kubelet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Renew hosts file on a whole cluster
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '*' state.sls linux.network.host;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/k8s-ha-contrail/salt.yaml b/tcp_tests/templates/k8s-ha-contrail/salt.yaml
deleted file mode 100644
index 086dc83..0000000
--- a/tcp_tests/templates/k8s-ha-contrail/salt.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-{% from 'k8s-ha-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'k8s-ha-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'k8s-ha-contrail/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS(IS_CONTRAIL_LAB=true) }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.ADJUST_K8S_OPTS() }}
-
-{{ SHARED.REGISTER_COMPUTE_NODES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/k8s-ha-contrail/underlay--meta-data.yaml b/tcp_tests/templates/k8s-ha-contrail/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/k8s-ha-contrail/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 6076ffa..0000000
--- a/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,91 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifdown ens4
- - sudo ifdown ens5
- - sudo ifup ens4
- - sudo ifup ens5
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - eatmydata apt-get clean && eatmydata apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
- auto ens4
- iface ens4 inet dhcp
- auto ens5
- iface ens5 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/k8s-ha-contrail/underlay--user-data1404.yaml b/tcp_tests/templates/k8s-ha-contrail/underlay--user-data1404.yaml
deleted file mode 100644
index c56cc3b..0000000
--- a/tcp_tests/templates/k8s-ha-contrail/underlay--user-data1404.yaml
+++ /dev/null
@@ -1,81 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup eth0
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifup eth1
- - sudo ifup eth2
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## tcp cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
- - sudo add-apt-repository universe
- - echo "deb [arch=amd64] http://apt.mirantis.com/trusty {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -
- - echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/{{ SALT_VERSION }} trusty main" > /etc/apt/sources.list.d/saltstack.list
- - wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -
-
- - eatmydata apt-get clean
- - eatmydata apt-get update && eatmydata apt-get -y upgrade
-
- # install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Workaround for bug https://mirantis.jira.com/browse/PROD-8214
- - eatmydata apt-get -y install --install-recommends linux-generic-lts-xenial
- - reboot
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto eth0
- iface eth0 inet dhcp
- auto eth1
- iface eth1 inet dhcp
- auto eth2
- iface eth2 inet dhcp
diff --git a/tcp_tests/templates/k8s-ha-contrail/underlay--user-data1604.yaml b/tcp_tests/templates/k8s-ha-contrail/underlay--user-data1604.yaml
deleted file mode 100644
index d23efd2..0000000
--- a/tcp_tests/templates/k8s-ha-contrail/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifup ens4
- - sudo ifup ens5
-
- # Create swap disabled due k8s >1.8.x allows use swap only with
- # flag `--fail-swap-on=false` in kubelet args
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
- auto ens4
- iface ens4 inet dhcp
- auto ens5
- iface ens5 inet dhcp
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/k8s-ha-contrail/underlay.yaml b/tcp_tests/templates/k8s-ha-contrail/underlay.yaml
deleted file mode 100644
index b87f888..0000000
--- a/tcp_tests/templates/k8s-ha-contrail/underlay.yaml
+++ /dev/null
@@ -1,446 +0,0 @@
-# This environment requires 50.5 GB of RAM and 270GB of Storage. Run with caution.
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'k8s-ha-contrail/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'k8s-ha-contrail/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'k8s-ha-contrail/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'k8s-ha-contrail/underlay--user-data1404.yaml' as CLOUDINIT_USER_DATA_1404 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_1404 {{ CLOUDINIT_USER_DATA_1404 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'k8s-ha-contrail') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp0.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp1.' + DOMAIN_NAME) %}
-{% set HOSTNAME_NTW01 = os_env('HOSTNAME_NTW01', 'ntw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_NTW02 = os_env('HOSTNAME_NTW02', 'ntw02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_NTW03 = os_env('HOSTNAME_NTW03', 'ntw03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_VSRX01 = os_env('HOSTNAME_VSRX01', 'vsrx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'k8s-ha-contrail_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
-
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '172.16.10.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_NTW01 }}: +110
- default_{{ HOSTNAME_NTW02 }}: +111
- default_{{ HOSTNAME_NTW03 }}: +112
- default_{{ HOSTNAME_VSRX01 }}: +90
- ip_ranges:
- dhcp: [+90, -10]
-
- public-pool01:
- net: {{ os_env('PUBLIC_ADDRESS_POOL01', '192.168.10.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_NTW01 }}: +110
- default_{{ HOSTNAME_NTW02 }}: +111
- default_{{ HOSTNAME_NTW03 }}: +112
- default_{{ HOSTNAME_VSRX01 }}: +90
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_NTW01 }}: +110
- default_{{ HOSTNAME_NTW02 }}: +111
- default_{{ HOSTNAME_NTW03 }}: +112
- default_{{ HOSTNAME_VSRX01 }}: +90
- ip_ranges:
- dhcp: [+10, -10]
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: True
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- private: private-pool01
- public: public-pool01
- admin: admin-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: true
-
- public:
- address_pool: public-pool01
- dhcp: true
- forward:
- mode: nat
-
- admin:
- address_pool: admin-pool01
- dhcp: true
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
-
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- - name: cloudimage1404
- source_image: !os_env IMAGE_PATH1404
- format: qcow2
-
- - name: vsrx_image
- source_image: !os_env IMAGE_VSRX
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: public
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: admin
- interface_model: *interface_model
-
- network_config:
- ens3:
- networks:
- - public
- ens4:
- networks:
- - private
- ens5:
- networks:
- - admin
-
- - name: {{ HOSTNAME_CTL01 }}
- role: k8s_controller
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: public
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: admin
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - public
- ens4:
- networks:
- - private
- ens5:
- networks:
- - admin
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_NTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1404
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1404
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_NTW02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1404
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1404
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_NTW03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 20
- backing_store: cloudimage1404
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1404
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_VSRX01 }}
- role: vsrx
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 512
- boot:
- - hd
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 10
- backing_store: vsrx_image
- format: qcow2
- #- name: iso
- #- capacity: 1
- #- format: raw
- #- device: cdrom
- #- bus: ide
- #- cloudinit_user_data: !include juniper.conf
-
- interfaces:
- - label: ge-0/0/0
- l2_network_device: private
- interface_model: *interface_model
- mac_address: 52:54:00:4e:b4:36
- - label: ge-0/0/1
- l2_network_device: public
- interface_model: *interface_model
- mac_address: 52:54:00:e1:44:9d
- - label: ge-0/0/2
- l2_network_device: admin
- interface_model: *interface_model
- mac_address: 52:54:00:72:08:77
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index e19b15c..904a562 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -1,6 +1,7 @@
{# Collection of common macroses shared across different deployments #}
{% set SALT_MODELS_BRANCH = os_env('SALT_MODELS_BRANCH','master') %}
+{% set JENKINS_PIPELINE_BRANCH = os_env('JENKINS_PIPELINE_BRANCH','') %}
{% set SALT_MODELS_COMMIT = os_env('SALT_MODELS_COMMIT','master') %}
{# Reference to a patch that should be applied to the model if required, for example: export SALT_MODELS_REF_CHANGE=refs/changes/19/7219/12 #}
{% set SALT_MODELS_REF_CHANGE = os_env('SALT_MODELS_REF_CHANGE', '') %}
@@ -20,8 +21,11 @@
# Currently we support 2 salt version that can be set over bellow var
{% set SALT_VERSION = os_env('SALT_VERSION','2017.7') %}
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set UPDATE_REPO_CUSTOM_TAG = os_env('UPDATE_REPO_CUSTOM_TAG', '') %}
+{% set UPDATE_VERSION = os_env('UPDATE_VERSION', 'proposed') %}
{# set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', 'deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME} ' + REPOSITORY_SUITE + ' salt extra') #}
{% set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/salt-formulas"+"/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") %}
+{% set UPDATE_FORMULA_REPOSITORY = os_env('UPDATE_FORMULA_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/update/" + UPDATE_VERSION + "/salt-formulas"+"/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") %}
{# set FORMULA_GPG = os_env('FORMULA_GPG', 'http://apt.mirantis.com/public.gpg') #}
{% set FORMULA_GPG = os_env('FORMULA_GPG', "http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/salt-formulas/xenial/archive-salt-formulas.key") %}
{# set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/2016.3 " + REPOSITORY_SUITE + " main") #}
@@ -34,7 +38,6 @@
{% set UBUNTU_SECURITY_REPOSITORY = os_env('UBUNTU_SECURITY_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-security main restricted universe") %}
{% set UBUNTU_KEY_SERVER = os_env('UBUNTU_KEY_SERVER', 'keyserver.ubuntu.com') %}
{% set UBUNTU_KEY_ID = os_env('UBUNTU_KEY_ID', '0E08A149DE57BFBE') %}
-{% set UPDATE_REPO_CUSTOM_TAG = os_env('UPDATE_REPO_CUSTOM_TAG', '') %}
{# Address pools for reclass cluster model are taken in the following order:
# 1. environment variables,
@@ -89,6 +92,25 @@
{%- endmacro %}
+{%- macro MACRO_INSTALL_FORMULAS_FROM_UPDATE() %}
+{#####################################################}
+
+- description: 'Configure key on nodes and install packages'
+ cmd: |
+ rm -rf trusted* ;
+ rm -rf /etc/apt/sources.list ;
+ . /etc/lsb-release; # Get DISTRIB_CODENAME variable
+ echo "{{ UPDATE_FORMULA_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_update_salt.list;
+ wget -O - "{{ FORMULA_GPG }}" | apt-key add -;
+ eatmydata apt-get clean;
+ apt-get update;
+ sync;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{%- endmacro %}
+
{%- macro MACRO_INSTALL_SALT_MASTER() %}
{######################################}
- description: Installing salt master on cfg01
@@ -401,6 +423,9 @@
{%- elif SALT_MODELS_SYSTEM_TAG != '' %}
echo "SALT_MODELS_SYSTEM_TAG={{ SALT_MODELS_SYSTEM_TAG }}"
{%- endif %}
+ {%- if JENKINS_PIPELINE_BRANCH != '' %}
+ echo "JENKINS_PIPELINE_BRANCH={{ JENKINS_PIPELINE_BRANCH }}"
+ {%- endif %}
echo "======================================="
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
@@ -459,6 +484,9 @@
sed -i 's/cluster_name: .*/cluster_name: {{ CLUSTER_NAME }}/g' {{ CLUSTER_CONTEXT_PATH }}
sed -i 's/cluster_domain: .*/cluster_domain: {{ DOMAIN_NAME }}/g' {{ CLUSTER_CONTEXT_PATH }}
sed -i 's/mcp_version:.*/mcp_version: {{ REPOSITORY_SUITE }}/g' {{ CLUSTER_CONTEXT_PATH }}
+ {%- if JENKINS_PIPELINE_BRANCH != '' %}
+ sed -i 's/jenkins_pipelines_branch: .*/jenkins_pipelines_branch: release\/proposed\/{{ REPOSITORY_SUITE }}/g' {{ CLUSTER_CONTEXT_PATH }}
+ {%- endif %}
{%- if CONTROL_VLAN %}
sed -i 's/control_vlan: .*/control_vlan: {{ CONTROL_VLAN }}/g' {{ CLUSTER_CONTEXT_PATH }}
{%- endif %}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-cookiecutter-mcp-pike-dvr.yaml
deleted file mode 100644
index 9e0598d..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-cookiecutter-mcp-pike-dvr.yaml
+++ /dev/null
@@ -1,154 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: virtual-mcp-pike-dvr.local
- cluster_name: virtual-mcp-pike-dvr
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 8.8.8.8
- dns_server02: 8.8.4.4
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '100'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: pike
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_notification_app_id: '24'
- oss_notification_sender_password: password
- oss_notification_smtp_port: '587'
- oss_notification_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
- stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-environment.yaml
deleted file mode 100644
index 0127547..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-environment.yaml
+++ /dev/null
@@ -1,165 +0,0 @@
-nodes:
- cfg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_designate_pool_manager_keystone
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- dns01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node01
- roles:
- - features_designate_pool_manager_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node01_address}
-
- dns02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node02
- roles:
- - features_designate_pool_manager_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node02_address}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cfg01_configure.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cfg01_configure.yaml
deleted file mode 100644
index bcbfec4..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cfg01_configure.yaml
+++ /dev/null
@@ -1,128 +0,0 @@
-{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-{% set CLUSTER_NAME = os_env('CLUSTER_NAME', LAB_CONFIG_NAME) %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_CONFIG_DAY01_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-- description: Import ssh key for jenkins user
- cmd: |
- mkdir -p /var/lib/jenkins/.ssh && \
- ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts && \
- chown jenkins /var/lib/jenkins/.ssh/known_hosts
- node_name: {{ HOSTNAME_CFG01 }}
- skip_fail: False
-
-- description: Upload maas config
- upload:
- local_path: {{ config.day1_cfg_config.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: {{ config.day1_cfg_config.cluster_maas_config }}
- remote_path: /srv/salt/reclass/classes/cluster/{{ CLUSTER_NAME }}/infra/
- node_name: {{ HOSTNAME_CFG01 }}
- skip_fail: False
-
-- description: Rename maas config
- cmd: mv -v /srv/salt/reclass/classes/cluster/{{ CLUSTER_NAME }}/infra/{{ config.day1_cfg_config.cluster_maas_config }} /srv/salt/reclass/classes/cluster/{{ CLUSTER_NAME }}/infra/maas.yml
- node_name: {{ HOSTNAME_CFG01 }}
- skip_fail: False
-
-- description: Save machines macs
- cmd: |
- echo -n '{{ config.day1_cfg_config.maas_machines_macs | tojson }}' | \
- python -c 'import sys, yaml, json; yaml.safe_dump(json.load(sys.stdin), sys.stdout, default_flow_style=False)' > /srv/salt/reclass/classes/cluster/{{ CLUSTER_NAME }}/infra/maas-machines.yml
- node_name: {{ HOSTNAME_CFG01 }}
- skip_fail: False
-
-{#
-{{ SHARED.MACRO_INSTALL_FORMULAS(FORMULA_SERVICES='"fluentd"') }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "logrotate"') }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-#}
-
-{{ SHARED.MACRO_CONFIG_DAY01_SALT_MINION() }}
-
-- description: Fix config for Jenkins
- cmd: |
- export SALT_MASTER_MINION_ID={{ HOSTNAME_CFG01 }}
- find /var/lib/jenkins/jenkins.model.JenkinsLocationConfiguration.xml -type f -print0 | xargs -0 sed -i -e 's/10.167.4.15/'$SALT_MASTER_DEPLOY_IP'/g'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
-- description: Setup pipeline libraries
- cmd: |
- export PIPELINE_REPO_URL=https://github.com/Mirantis
- git clone --mirror $PIPELINE_REPO_URL/mk-pipelines.git /home/repo/mk/mk-pipelines/
- git clone --mirror $PIPELINE_REPO_URL/pipeline-library.git /home/repo/mcp-ci/pipeline-library/
- chown -R git:www-data /home/repo/mk/mk-pipelines/*
- chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Refresh pillars before generating nodes
- cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync all salt resources
- cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.sync_all && sleep 5
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Configure network, linux, openssh and salt on cfg01 node
- cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls linux.network,linux,openssh,salt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-#- description: Restart MaaS services before run state (need to prevent maas stucking)
-# cmd: systemctl restart maas-regiond && systemctl restart maas-rackd
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-- description: Configure MaaS(cluster) on cfg01 node
- cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls maas.cluster
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Configure MaaS(region) on cfg01 node
- cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls maas.region
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: Configure reclass on cfg01 node
- cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls reclass
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Configure jenkins on cfg01 node
- cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls jenkins.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cluster_infra_maas.yml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cluster_infra_maas.yml
deleted file mode 100644
index 56394da..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cluster_infra_maas.yml
+++ /dev/null
@@ -1,147 +0,0 @@
----
-classes:
-- system.linux.system.repo.mcp.apt_mirantis.maas
-- system.maas.region.single
-- service.jenkins.client
-- system.jenkins.client.credential.salt
-- system.jenkins.client.job.deploy.openstack
-- cluster.virtual-mcp-pike-dvr.infra
-parameters:
- _param:
- maas_admin_username: mirantis
- maas_admin_password: r00tme
- maas_db_password: fRqC7NJrBR0x
- dns_server01: 8.8.8.8
- maas_region_port: 5240
- maas_cluster_region_port: ${_param:maas_region_port}
- infra_config_deploy_address: ${_param:reclass_config_master}
- jenkins_git_url: 'git@cfg01:/home/repo'
- jenkins_gerrit_url: ${_param:jenkins_git_url}
- salt_api_password: hovno12345!
- jenkins_git_url: 'git@cfg01:/home/repo'
- jenkins_gerrit_url: ${_param:jenkins_git_url}
- jenkins_salt_api_url: "http://${_param:reclass_config_master}:6969"
- jenkins_pipeline_library_url: git@cfg01:/home/repo/mcp-ci/pipeline-library
- jenkins_pipelines_branch: master
- jenkins:
- client:
- lib:
- pipeline-library:
- url: ${_param:jenkins_pipeline_library_url}
- branch: ${_param:jenkins_pipelines_branch}
- master:
- host: ${_param:reclass_config_master}
- port: 8081
- password: r00tme
- maas:
- region:
- commissioning_scripts:
- 00-maas-05-simplify-network-interfaces: /etc/maas/files/commisioning_scripts/00-maas-05-simplify-network-interfaces
- bind:
- host: ${_param:reclass_config_master}:${_param:maas_region_port}
- port: 5240
- maas_config:
- main_archive: http://mirror.mirantis.com/${_param:apt_mk_version}/ubuntu/
- disk_erase_with_secure_erase: false
- machines:
- ctl01: # cz7341-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- interface:
- # mac: "0c:c4:7a:33:1f:e4"
- # ip: ${_param:infra_kvm_node01_deploy_address}
- # subnet: "deploy_network" # create it manually... in UI
- # gateway: ${_param:deploy_network_gateway}
- power_parameters:
- # power_address: "185.8.59.161"
- power_password: "r00tme"
- power_type: ipmi
- power_user: admin
- ctl02: # #cz7342-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- interface:
- # mac: "0c:c4:7a:33:20:fc"
- # # ip: ${_param:infra_kvm_node02_deploy_address}
- power_parameters:
- # power_address: "185.8.59.162"
- power_password: "r00tme"
- power_type: ipmi
- power_user: admin
- ctl03: # #cz7343-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- interface:
- # mac: "0c:c4:7a:31:fb:b6"
- # ip: ${_param:infra_kvm_node03_deploy_address}
- power_parameters:
- # power_address: "185.8.59.163"
- power_password: "r00tme"
- power_type: ipmi
- power_user: admin
- gtw01: # #cz7055-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- interface:
- # mac: "00:25:90:e3:3b:26"
- # ip: ${_param:infra_kvm_node06_deploy_address}
- power_parameters:
- # power_address: "176.74.222.106"
- power_password: "r00tme"
- power_type: ipmi
- power_user: admin
- cmp01: # cz7054-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- interface:
- # mac: "00:25:90:e3:37:2e"
- power_parameters:
- # power_address: "176.74.222.104"
- power_password: "r00tme"
- power_type: ipmi
- power_user: admin
- cmp02: #cz7056-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- interface:
- # mac: "00:25:90:e3:3f:2a"
- power_parameters:
- # power_address: "176.74.222.108"
- power_password: "r00tme"
- power_type: ipmi
- power_user: admin
- dns01: #cz7056-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- interface:
- # mac: "00:25:90:e3:3f:2a"
- power_parameters:
- # power_address: "176.74.222.108"
- power_password: "r00tme"
- power_type: ipmi
- power_user: admin
- dns02: #cz7056-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- interface:
- # mac: "00:25:90:e3:3f:2a"
- power_parameters:
- # power_address: "176.74.222.108"
- power_password: "r00tme"
- power_type: ipmi
- power_user: admin
- prx01: #cz7056-kvm.host-telecom.com
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- interface:
- # mac: "00:25:90:e3:3f:2a"
- power_parameters:
- # power_address: "176.74.222.108"
- power_password: "r00tme"
- power_type: ipmi
- power_user: admin
-
- cluster:
- region:
- host: ${_param:reclass_config_master}:${_param:maas_cluster_region_port}
-
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml
deleted file mode 100644
index ccc1a07..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/openstack.yaml
deleted file mode 100644
index c3ab09e..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/openstack.yaml
+++ /dev/null
@@ -1,281 +0,0 @@
-{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
-{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{%- if OVERRIDE_POLICY != '' %}
-- description: Upload policy override
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: overrides-policy.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Create custom cluster control class
- cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Rename control classes
- cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
- ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
-{%- endif %}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-# isntall designate
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@powerdns:server' state.sls powerdns.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-#- description: Allow all tcp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Allow all icmp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Restart cinder volume
- cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/overrides-policy.yml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/overrides-policy.yml
deleted file mode 100644
index 1f35a6b..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/overrides-policy.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-parameters:
- nova:
- controller:
- policy:
- context_is_admin: 'role:admin or role:administrator'
- 'compute:create': 'rule:admin_or_owner'
- 'compute:create:attach_network':
- cinder:
- controller:
- policy:
- 'volume:delete': 'rule:admin_or_owner'
- 'volume:extend':
- neutron:
- server:
- policy:
- create_subnet: 'rule:admin_or_network_owner'
- 'get_network:queue_id': 'rule:admin_only'
- 'create_network:shared':
- glance:
- server:
- policy:
- publicize_image: "role:admin"
- add_member:
- keystone:
- server:
- policy:
- admin_or_token_subject: 'rule:admin_required or rule:token_subject'
- heat:
- server:
- policy:
- context_is_admin: 'role:admin and is_admin_project:True'
- deny_stack_user: 'not role:heat_stack_user'
- deny_everybody: '!'
- 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
- 'cloudformation:DescribeStackResources':
- ceilometer:
- server:
- policy:
- segregation: 'rule:context_is_admin'
- 'telemetry:get_resource':
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/sl.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/sl.yaml
deleted file mode 100644
index d7461ed..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/sl.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
-{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Install slv2 infra
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure fluentd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 63fb199..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sudo ifdown ens3
- - rm /etc/network/interfaces
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3 || true
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.176.6" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet static
- address {address}
- netmask {netmask}
- gateway {gateway}
- dns-nameservers 172.18.176.6
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay.yaml
deleted file mode 100644
index aa5cbb5..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay.yaml
+++ /dev/null
@@ -1,512 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp-pike-dvr-maas/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-pike-dvr-maas/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-pike-dvr') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-pike-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: true
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: false
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: true
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: nat
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- # - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- # source_image: !os_env MCP_IMAGE_PATH1604
- # format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- bmc_port: 41623
- bmc_network: admin
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- # cloud_init_volume_name: iso
- # cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- # backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
- # capacity: 1
- # format: raw
- # device: cdrom
- # bus: ide
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- bmc_port: 41624
- bmc_network: admin
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- # cloud_init_volume_name: iso
- # cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- # backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
- # capacity: 1
- # format: raw
- # device: cdrom
- # bus: ide
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- bmc_port: 41625
- bmc_network: admin
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
- boot:
- - hd
- # cloud_init_volume_name: iso
- # cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- # backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
- # capacity: 1
- # format: raw
- # device: cdrom
- # bus: ide
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- bmc_port: 41626
- bmc_network: admin
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- # cloud_init_volume_name: iso
- # cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- # backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
- # capacity: 1
- # format: raw
- # device: cdrom
- # bus: ide
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- bmc_port: 41630
- bmc_network: admin
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- # cloud_init_volume_name: iso
- # cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- # backing_store: cloudimage1604
- format: qcow2
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
- # capacity: 1
- # format: raw
- # device: cdrom
- # bus: ide
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- bmc_port: 41631
- bmc_network: admin
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- # cloud_init_volume_name: iso
- # cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- # backing_store: cloudimage1604
- format: qcow2
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
- # capacity: 1
- # format: raw
- # device: cdrom
- # bus: ide
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- bmc_port: 41632
- bmc_network: admin
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- # cloud_init_volume_name: iso
- # cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- # backing_store: cloudimage1604
- format: qcow2
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
- # capacity: 1
- # format: raw
- # device: cdrom
- # bus: ide
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- bmc_port: 41633
- bmc_network: admin
-
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- # cloud_init_volume_name: iso
- # cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- # backing_store: mcp_ubuntu_1604_image
- format: qcow2
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
- # capacity: 1
- # format: raw
- # device: cdrom
- # bus: ide
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- bmc_port: 41634
- bmc_network: admin
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- # cloud_init_volume_name: iso
- # cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- # backing_store: mcp_ubuntu_1604_image
- format: qcow2
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
- # capacity: 1
- # format: raw
- # device: cdrom
- # bus: ide
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- bmc_port: 41635
- bmc_network: admin
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/core.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/core.yaml
deleted file mode 100644
index cf4a90a..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/openstack.yaml
deleted file mode 100644
index 1579920..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/openstack.yaml
+++ /dev/null
@@ -1,163 +0,0 @@
-{% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-# Install OpenStack dashboard and proxy services
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-#- description: Allow all tcp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; openstack security group rule create --proto tcp --dst-port 22 default'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Allow all icmp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; openstack security group rule create --proto icmp default'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/salt.yaml
deleted file mode 100644
index f6cc4c7..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/salt.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-{% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import HOSTNAME_VSWITCH with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'virtual-mcp-pike-ovs-l2gw-bgpvpn/vswitch-config.yaml' as VSWITCH with context %}
-
-{% set VSWITCH_IP = SHARED.IPV4_NET_CONTROL_PREFIX+'.178' %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_VSWITCH) }}
-
-{{ VSWITCH.MACRO_CONFIGURE_VSWITCH(HOSTNAME_VSWITCH, VSWITCH_IP) }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja" "auditd" "gnocchi" "manila" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-{{ VSWITCH.MACRO_CHECK_BGPVPN_ENABLED_BY_DEFAULT() }}
-
-{{ VSWITCH.MACRO_ENABLE_L2GW(SHARED.CLUSTER_NAME, VSWITCH_IP) }}
-
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml
deleted file mode 100644
index 578c2ad..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml
+++ /dev/null
@@ -1,556 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-pike-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-{% set HOSTNAME_VSWITCH = 'vswitch.' + DOMAIN_NAME %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-pike-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MDB01 }}: +45
- default_{{ HOSTNAME_MDB02 }}: +46
- default_{{ HOSTNAME_MDB03 }}: +47
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
-
- default_{{ HOSTNAME_VSWITCH }}: +178
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MDB01 }}: +45
- default_{{ HOSTNAME_MDB02 }}: +46
- default_{{ HOSTNAME_MDB03 }}: +47
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
-
- default_{{ HOSTNAME_VSWITCH }}: +178
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MDB01 }}: +45
- default_{{ HOSTNAME_MDB02 }}: +46
- default_{{ HOSTNAME_MDB03 }}: +47
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MDB01 }}: +45
- default_{{ HOSTNAME_MDB02 }}: +46
- default_{{ HOSTNAME_MDB03 }}: +47
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: true
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: true
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_VSWITCH }}
- role: vm
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/vswitch-config.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/vswitch-config.yaml
deleted file mode 100644
index 705e9be..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/vswitch-config.yaml
+++ /dev/null
@@ -1,81 +0,0 @@
-
-{%- macro MACRO_CONFIGURE_VSWITCH(NODE_NAME, IP) %}
-{#################################################}
-
-- description: 'Install openvswitch-vtep package and configure it'
- cmd: |
- ip addr add {{ IP }}/24 dev ens4
- ifconfig ens4 up
-
- apt-get update
- apt-get -y install openvswitch-switch
- service openvswitch-switch stop
- apt-get -y install openvswitch-vtep bridge-utils
-
- ovsdb-tool create /etc/openvswitch/vtep.db /usr/share/openvswitch/vtep.ovsschema
- ovsdb-tool create /etc/openvswitch/vswitch.db /usr/share/openvswitch/vswitch.ovsschema
- ovsdb-server --pidfile --detach --log-file --remote ptcp:6632:{{ IP }} --remote punix:/var/run/openvswitch/db.sock --remote=db:hardware_vtep,Global,managers /etc/openvswitch/vswitch.db /etc/openvswitch/vtep.db
- ovs-vswitchd --log-file --detach --pidfile unix:/var/run/openvswitch/db.sock
- ovs-vsctl add-br v-switch
- vtep-ctl add-ps v-switch
- vtep-ctl set Physical_Switch v-switch tunnel_ips={{ IP }}
- ovs-vsctl add-port v-switch port0 -- set interface port0 type=internal
- vtep-ctl add-port v-switch port0
- /usr/share/openvswitch/scripts/ovs-vtep --log-file=/var/log/openvswitch/ovs-vtep.log --pidfile=/var/run/openvswitch/ovs-vtep.pid --detach v-switch
- node_name: {{ NODE_NAME }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{%- endmacro %}
-
-{%- macro MACRO_CHECK_BGPVPN_ENABLED_BY_DEFAULT() %}
-{#####################################################}
-
-- description: 'Check BGPVPN extension is enabled by default'
- cmd: salt 'cmp*' pillar.get neutron:compute:bgp_vpn:enabled | grep True
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{%- endmacro %}
-
-{%- macro MACRO_ENABLE_L2GW(CLUSTER_NAME, VSWITCH_IP) %}
-{#####################################################}
-
-- description: 'Check L2GW is disabled by default'
- cmd: salt 'gtw01*' pillar.get neutron:gateway:l2gw:enabled | grep False
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: 'Enable L2GW'
- cmd: |
- set -e;
- set -x;
- {%- set CLUSTER_PATH = '/srv/salt/reclass/classes/cluster/' + CLUSTER_NAME %}
-
- echo "Setting 'enable: true' for L2gw feature to gateway.yml file"
- L2GW_LINE=$(sed -n '/l2gw/=' {{ CLUSTER_PATH }}/openstack/gateway.yml)
- L2GW_ENABLE_LINE=$((L2GW_LINE + 1))
- sed -i "${L2GW_ENABLE_LINE}s/enabled: false/enabled: true/1" {{ CLUSTER_PATH }}/openstack/gateway.yml
-
- echo "Setting 'ovsdb_hosts' ips for L2gw feature to gateway.yml file"
- sed -i "s/ovsdbx: 127.0.0.1:6632/ovsdbx: {{VSWITCH_IP}}:6632/1" {{ CLUSTER_PATH }}/openstack/gateway.yml
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: 'Refresh pillar data after L2GW enablement'
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: 'Check L2GW is enabled'
- cmd: salt 'gtw01*' pillar.get neutron:gateway:l2gw:enabled | grep True
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{%- endmacro %}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/core.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/core.yaml
deleted file mode 100644
index 0a43183..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/core.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-- description: remove apparmor
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- '*' cmd.run 'service apparmor stop; service apparmor teardown; update-rc.d -f apparmor remove; apt-get -y remove apparmor'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml
deleted file mode 100644
index eb03f23..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml
+++ /dev/null
@@ -1,135 +0,0 @@
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-# Install OpenStack control services
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'virtual-offline-pike-ovs-dpdk') %}
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-# isntall designate
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@powerdns:server' state.sls powerdns.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-# Install Telemetry services (mdb nodes)
-- description: Install redis service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@redis:cluster:role:master' state.sls redis &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@redis:server' state.sls redis
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install gnocchi server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:server and *01*' state.sls gnocchi.server &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:server' state.sls gnocchi.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup gnocchi client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:client and *01*' state.sls gnocchi.client &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:client' state.sls gnocchi.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install panko server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@panko:server and *01*' state.sls panko &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@panko:server' state.sls panko
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install ceilometer server on first node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceilometer:server and *01*' state.sls ceilometer
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install ceilometer server on other nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceilometer:server' state.sls ceilometer
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install aodh server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aodh:server and *01*' state.sls aodh &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aodh:server' state.sls aodh
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ BACKUP.MACRO_WR_NGINX_MASTER() }}
-{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
-{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml
deleted file mode 100644
index 2a93e5f..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-# Install OpenStack control services
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'virtual-offline-pike-ovs-dpdk') %}
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/' + REPOSITORY_SUITE + '/docker/xenial xenial stable') %}
-
-- description: Run 'openssh' formula on cfg01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@salt:master' state.sls openssh &&
- salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@salt:master' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
- yes/' /etc/ssh/sshd_config && service ssh reload"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: Configure openssh on all nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system and not cfg01*' state.sls openssh &&
- salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@linux:system and not cfg01*' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
- yes/' /etc/ssh/sshd_config && service ssh reload"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-#- description: Upload cirros image on ctl01
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 2, delay: 30}
-# skip_fail: false
-#
-#- description: Create net04_external
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Create subnet_external
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Create net04
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; neutron net-create net04'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Create subnet_net04
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Create router
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; neutron router-create net04_router01'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Set geteway
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Add interface
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ BACKUP.MACRO_WR_NGINX_MASTER() }}
-{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
-{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh
deleted file mode 100755
index bb1316a..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/bash
-
-. /home/jenkins/fuel-devops30/bin/activate
-pip install -r ./tcp_tests/requirements.txt -U
-pip install psycopg2
-
-export ENV_NAME=virtual-offline-e-dpdk
-export VENV_PATH=/home/jenkins/fuel-devops30
-export IMAGE_PATH1604=/home/jenkins/images/xenial-server-cloudimg-amd64.qcow2
-export SHUTDOWN_ENV_ON_TEARDOWN=false
-export PYTHONIOENCODING=UTF-8
-export LAB_CONFIG_NAME=virtual-offline-pike-ovs-dpdk
-export CLUSTER_NAME=virtual-offline-pike-ovs-dpdk
-export REPOSITORY_SUITE=2018.3.0
-export SALT_VERSION=2017.7
-
-export TEST_GROUP=test_mcp_pike_ovs_install
-export RUN_TEMPEST=true
-
-# Offline deploy parameters
-#export SALT_MODELS_REF_CHANGE=refs/changes/44/15144/1
-
-export BOOTSTRAP_TIMEOUT=1200
-
-export HOST_APT=10.170.0.226
-export HOST_SALTSTACK=10.170.0.226
-export HOST_ARCHIVE_UBUNTU=10.170.0.226
-export HOST_MIRROR_MCP_MIRANTIS=10.170.0.226
-export HOST_MIRROR_FUEL_INFRA=10.170.0.226
-export HOST_PPA_LAUNCHPAD=10.170.0.226
-export DISTRIB_CODENAME=xenial
-
-export SALT_MODELS_SYSTEM_REPOSITORY=https://gerrit.mcp.mirantis.local.test/salt-models/reclass-system
-export SALT_FORMULAS_REPO=https://gerrit.mcp.mirantis.local.test/salt-formulas
-export FORMULA_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial ${REPOSITORY_SUITE} salt extra"
-export FORMULA_GPG="http://apt.mirantis.local.test/public.gpg"
-export SALT_REPOSITORY = "deb [arch=amd64] http://mirror.mirantis.local.test/" + REPOSITORY_SUITE+ "/saltstack-" + SALT_VERSION+ "/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main"
-#export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2017.7 main"
-export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main restricted universe"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main restricted universe"
-
-cd tcp_tests
-py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
deleted file mode 100644
index 21dc19d..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
+++ /dev/null
@@ -1,155 +0,0 @@
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_APT01 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CID01 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CID02 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CID03 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_PRX01 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_DNS01 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_DNS02 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_MDB01 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_MDB02 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_MDB03 with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_VS with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.local.test/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_CONTROL_PREFIX with context %}
-{% import 'virtual-offline-pike-ovs-dpdk/vswitch-config.yaml' as VSWITCH with context %}
-{% set VSWITCH_IP = SHARED.IPV4_NET_CONTROL_PREFIX+'.178' %}
-
-- description: Check nginx APT node is ready
- cmd: systemctl status nginx;
- node_name: {{ HOSTNAME_APT01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check dnsmasq on APT node is ready
- cmd: systemctl status dnsmasq;
- node_name: {{ HOSTNAME_APT01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_VS) }}
-{{ VSWITCH.MACRO_CONFIGURE_VSWITCH(HOSTNAME_VS, VSWITCH_IP) }}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL03) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CID01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CID02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CID03) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_GTW01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_PRX01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_DNS01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_DNS02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_MDB01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_MDB02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_MDB03) }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-- description: 'Workaround of local_repo_url - set to offline image repository structure'
- cmd: |
- find /srv/salt/reclass/classes/cluster -type f -exec sed -i 's/local_repo_url: .*/local_repo_url: mirror.mcp.mirantis.local.test/g' {} +
- find /srv/salt/reclass/classes/cluster -type f -exec sed -i 's/aptly_server_address: .*/aptly_server_address: {{ os_env('HOST_APT') }}/g' {} +
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog" "runtest" "auditd" "logrotate" "gnocchi" "manila" "jenkins" "glusterfs" "neutron"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-{{ VSWITCH.MACRO_CHECK_BGPVPN_ENABLED_BY_DEFAULT() }}
-
-{{ VSWITCH.MACRO_ENABLE_L2GW(SHARED.CLUSTER_NAME, VSWITCH_IP) }}
-
-- description: Enable hugepages on cmp nodes
- cmd: salt 'cmp*' cmd.run "apt-get install -y hugepages; echo 2048 > /proc/sys/vm/nr_hugepages";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#- description: Install watchdog
-# cmd: salt -C "I@watchdog:server" state.sls watchdog;
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: WR for correct acces to git repo from jenkins on cfg01 node
- cmd: |
- export GIT_SSL_NO_VERIFY=true; git clone --mirror https://gerrit.mcp.mirantis.local.test/mk/mk-pipelines /home/repo/mk/mk-pipelines/;
- export GIT_SSL_NO_VERIFY=true; git clone --mirror https://gerrit.mcp.mirantis.local.test/mcp-ci/pipeline-library /home/repo/mcp-ci/pipeline-library/;
- chown -R git:www-data /home/repo/mk/mk-pipelines/*;
- chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: '*Workaround* to remove apt key until migrate on CC'
- cmd: salt-key -d apt01.virtual-offline-pike-ovs-dpdk -y
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: true
-
-- description: '*Workaround* stop minion on apt like proxy node'
- cmd: systemctl stop salt-minion.service
- node_name: {{ HOSTNAME_APT01 }}
- retry: {count: 1, delay: 1}
- skip_fail: true
-
-#- description: Workaround to avoid reboot cmp nodes bring OVS interfaces UP
-# cmd: |
-# salt 'cmp*' cmd.run "ifup br-mesh";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Temporary WR
- cmd: |
- ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts || true;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Enable Jenkins
- cmd: |
- systemctl enable jenkins || true;
- systemctl restart jenkins || true;
- sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: run jenkins.client
- cmd: |
- salt-call state.sls jenkins.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 60}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--meta-data.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-apt01.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-apt01.yaml
deleted file mode 100644
index fe2c8f3..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-apt01.yaml
+++ /dev/null
@@ -1,115 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifup ens4
- - sudo ifup ens5
- - sudo ifup ens6
-
- - rm /etc/resolv.conf
- - touch /etc/resolv.conf
- - export LOCAL_DNS_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
- - echo "nameserver $LOCAL_DNS_IP" >> /etc/resolv.conf;
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "supersede domain-name-servers $LOCAL_DNS_IP, 172.18.208.44" >> /etc/dhcp/dhclient.conf
- - export TERM=linux
- - export LANG=C
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## Cloud repo01 node ##################
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - eatmydata apt-get clean && apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y salt-minion python-pip git curl tmux byobu iputils-ping traceroute htop tree ntp;
- - openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=gerrit.mcp.mirantis.local.test" -keyout /root/ssl-nginx.key -out /root/ssl-nginx.crt;
- - cd /tmp;
- - git clone https://github.com/TatyankaLeontovich/salt-formula-nginx;
- - git clone https://github.com/TatyankaLeontovich/salt-dnsmasq;
- - git clone https://github.com/TatyankaLeontovich/underpillar;
- - mkdir -p /srv/pillar/;
- - mkdir -p /srv/salt;
- - cd /srv/salt;
- - ln -s /tmp/salt-formula-nginx/nginx;
- - ln -s /tmp/salt-dnsmasq/dnsmasq;
- - cp /tmp/underpillar/pillar/*.sls /srv/pillar/;
- - cp /tmp/underpillar/states/*.sls /srv/salt/;
- - cp /srv/pillar/top_localdns.sls /srv/pillar/top.sls;
- - cp /srv/salt/top_localdns.sls /srv/salt/top.sls;
- - find /srv/pillar/ -type f -exec sed -i "s/==LOCAL_DNS_IP==/${LOCAL_DNS_IP}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_APT==/{{ os_env('HOST_APT', 'apt.mirantis.com') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_SALTSTACK==/{{ os_env('HOST_SALTSTACK', 'repo.saltstack.com') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_ARCHIVE_UBUNTU==/{{ os_env('HOST_ARCHIVE_UBUNTU', 'archive.ubuntu.com') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_MCP_MIRANTIS==/{{ os_env('HOST_MIRROR_MCP_MIRANTIS', 'mirror.mcp.mirantis.net') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_FUEL_INFRA==/{{ os_env('HOST_MIRROR_FUEL_INFRA', 'mirror.fuel-infra.org') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_PPA_LAUNCHPAD==/{{ os_env('HOST_PPA_LAUNCHPAD', 'ppa.launchpad.net') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.com') }}/g" {} +
- - salt-call --local --state-output=mixed state.sls dnsmasq;
- - salt-call --local --state-output=mixed state.sls nginx;
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
- auto ens4
- iface ens4 inet dhcp
- auto ens5
- iface ens5 inet dhcp
- auto ens6
- iface ens6 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 800a0b1..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
-
- #- sudo route add default gw {gateway} {interface_name}
- - rm /etc/resolv.conf
- - touch /etc/resolv.conf
- - LOCAL_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1 | cut -d"." -f1-3)
- - export DNS_IP=$LOCAL_IP".122"
- - echo "nameserver $DNS_IP" >> /etc/resolv.conf;
- - echo "nameserver $LOCAL_IP.1" >> /etc/resolv.conf;
- - echo "supersede domain-name-servers $DNS_IP" >> /etc/dhcp/dhclient.conf
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data1604.yaml
deleted file mode 100644
index c5fc670..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,98 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- - rm /etc/resolv.conf
- - touch /etc/resolv.conf
- - LOCAL_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1 | cut -d"." -f1-3)
- - export DNS_IP=$LOCAL_IP".122"
- - echo "nameserver $DNS_IP" >> /etc/resolv.conf;
- - echo "nameserver $LOCAL_IP.1" >> /etc/resolv.conf;
- - echo "supersede domain-name-servers $DNS_IP" >> /etc/dhcp/dhclient.conf
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- - echo "Preparing base OS"
- # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml
deleted file mode 100644
index b9da22a..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml
+++ /dev/null
@@ -1,758 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-offline-pike-ovs-dpdk/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-offline-pike-ovs-dpdk/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-offline-pike-ovs-dpdk/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'virtual-offline-pike-ovs-dpdk/underlay--user-data-apt01.yaml' as CLOUDINIT_USER_DATA_APT01 with context %}
-
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_apt01 {{ CLOUDINIT_USER_DATA_APT01 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-offline-pike-ovs-dpdk') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_APT01 = os_env('HOSTNAME_APT01', 'apt01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_VS = 'vs.' + DOMAIN_NAME %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-offline-pike-ovs-dpdk_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CID }}: +80
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_MDB01 }}: +45
- default_{{ HOSTNAME_MDB02 }}: +46
- default_{{ HOSTNAME_MDB03 }}: +47
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_VS }}: +178
- ip_ranges:
- dhcp: [+60, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +122
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CID }}: +80
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_MDB01 }}: +45
- default_{{ HOSTNAME_MDB02 }}: +46
- default_{{ HOSTNAME_MDB03 }}: +47
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_VS }}: +178
- ip_ranges:
- dhcp: [+60, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MDB01 }}: +45
- default_{{ HOSTNAME_MDB02 }}: +46
- default_{{ HOSTNAME_MDB03 }}: +47
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- ip_ranges:
- dhcp: [+10, -10]
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_MDB01 }}: +45
- default_{{ HOSTNAME_MDB02 }}: +46
- default_{{ HOSTNAME_MDB03 }}: +47
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- ip_ranges:
- dhcp: [+90, -10]
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
- - name: apt_cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env APT_IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_APT01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: apt_cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_apt01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 12
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- numa:
- - cpus: 0,1,2,3,4,5
- memory: 4096
- - cpus: 6,7,8,9,10,11
- memory: 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 12
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- numa:
- - cpus: 0,1,2,3,4,5
- memory: 4096
- - cpus: 6,7,8,9,10,11
- memory: 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 8
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_VS }}
- role: vm
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_MDB01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2018
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID03 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/vswitch-config.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/vswitch-config.yaml
deleted file mode 100644
index df9fd73..0000000
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/vswitch-config.yaml
+++ /dev/null
@@ -1,102 +0,0 @@
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-{% set OPENSTACK_PIKE_REPOSITORY = os_env('OPENSTACK_PIKE_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.local.test/" + REPOSITORY_SUITE + "/openstack-pike/xenial/ xenial main") %}
-{% set UBUNTU_KEY_SERVER = os_env('UBUNTU_KEY_SERVER', 'keyserver.ubuntu.com') %}
-{% set UBUNTU_KEY_ID = os_env('UBUNTU_KEY_ID', '0E08A149DE57BFBE') %}
-
-
-{%- macro MACRO_CONFIGURE_VSWITCH(NODE_NAME, IP) %}
-{#################################################}
-- description: 'Enable openstack repo for needed packages '
- cmd: |
- apt-key adv --keyserver "{{UBUNTU_KEY_SERVER}}" --recv-keys "{{ UBUNTU_KEY_ID}}"
- echo "{{ OPENSTACK_PIKE_REPOSITORY }}" > /etc/apt/sources.list.d/openstack.list
- eatmydata apt-get clean;
- apt-get update;
- sync;
- node_name: {{ NODE_NAME }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: 'Install openvswitch-vtep package and configure it'
- cmd: |
- ip addr add {{ IP }}/24 dev ens4
- ifconfig ens4 up
-
- apt-get update
- apt-get -y install openvswitch-switch --allow-unauthenticated
- service openvswitch-switch stop
- apt-get -y install openvswitch-vtep bridge-utils --allow-unauthenticated
-
- ovsdb-tool create /etc/openvswitch/vtep.db /usr/share/openvswitch/vtep.ovsschema
- ovsdb-tool create /etc/openvswitch/vswitch.db /usr/share/openvswitch/vswitch.ovsschema
- ovsdb-server --pidfile --detach --log-file --remote ptcp:6632:{{ IP }} --remote punix:/var/run/openvswitch/db.sock --remote=db:hardware_vtep,Global,managers /etc/openvswitch/vswitch.db /etc/openvswitch/vtep.db
- ovs-vswitchd --log-file --detach --pidfile unix:/var/run/openvswitch/db.sock
- ovs-vsctl add-br v-switch
- vtep-ctl add-ps v-switch
- vtep-ctl set Physical_Switch v-switch tunnel_ips={{ IP }}
- ovs-vsctl add-port v-switch port0 -- set interface port0 type=internal
- vtep-ctl add-port v-switch port0
- /usr/share/openvswitch/scripts/ovs-vtep --log-file=/var/log/openvswitch/ovs-vtep.log --pidfile=/var/run/openvswitch/ovs-vtep.pid --detach v-switch
- node_name: {{ NODE_NAME }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{%- endmacro %}
-
-{%- macro MACRO_CHECK_BGPVPN_ENABLED_BY_DEFAULT() %}
-{#####################################################}
-
-- description: 'Check BGPVPN extension is enabled by default'
- cmd: salt 'cmp*' pillar.get neutron:compute:bgp_vpn:enabled | grep True
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{%- endmacro %}
-
-{%- macro MACRO_ENABLE_L2GW(CLUSTER_NAME, VSWITCH_IP) %}
-{#####################################################}
-
-- description: 'Check L2GW is disabled by default'
- cmd: salt 'gtw01*' pillar.get neutron:gateway:l2gw:enabled | grep False
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: 'Enable L2GW'
- cmd: |
- set -e;
- set -x;
- {%- set CLUSTER_PATH = '/srv/salt/reclass/classes/cluster/' + CLUSTER_NAME %}
-
- echo "Setting 'enable: true' for L2gw feature to gateway.yml file"
- L2GW_LINE=$(sed -n '/l2gw/=' {{ CLUSTER_PATH }}/openstack/gateway.yml)
- L2GW_ENABLE_LINE=$((L2GW_LINE + 1))
- sed -i "${L2GW_ENABLE_LINE}s/enabled: false/enabled: true/1" {{ CLUSTER_PATH }}/openstack/gateway.yml
-
- echo "Setting 'ovsdb_hosts' ips for L2gw feature to gateway.yml file"
- sed -i "s/ovsdbx: 127.0.0.1:6632/ovsdbx: {{VSWITCH_IP}}:6632/1" {{ CLUSTER_PATH }}/openstack/gateway.yml
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: 'Refresh pillar data after L2GW enablement'
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar; sleep 15
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: 'Sync all'
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: 'Check L2GW is enabled'
- cmd: salt 'gtw01*' pillar.get neutron:gateway:l2gw:enabled | grep True
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-{%- endmacro %}
diff --git a/tcp_tests/templates/virtual-offline-ssl/core.yaml b/tcp_tests/templates/virtual-offline-ssl/core.yaml
deleted file mode 100644
index c08b0cd..0000000
--- a/tcp_tests/templates/virtual-offline-ssl/core.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-- description: remove apparmor
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- '*' cmd.run 'service apparmor stop; service apparmor teardown; update-rc.d -f apparmor remove; apt-get -y remove apparmor'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-offline-ssl/openstack.yaml b/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
deleted file mode 100644
index 02ef0a3..0000000
--- a/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
+++ /dev/null
@@ -1,248 +0,0 @@
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% from 'virtual-offline-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'virtual-offline-ssl') %}
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/' + REPOSITORY_SUITE + '/docker/xenial xenial stable') %}
-
-# Install OpenStack control services
-
-- description: Nginx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-## isntall designate
-#- description: Install bind
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@bind:server' state.sls bind
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-#
-#- description: Install designate
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@designate:server' state.sls designate -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 5, delay: 10}
-# skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-# Install Telemetry services (mdb nodes)
-- description: Install redis service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@redis:cluster:role:master' state.sls redis &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@redis:server' state.sls redis
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install gnocchi server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:server and *01*' state.sls gnocchi.server &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:server' state.sls gnocchi.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup gnocchi client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:client and *01*' state.sls gnocchi.client &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:client' state.sls gnocchi.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-#- description: Install gnocchi statsd (optional)
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:statsd and *01*' state.sls gnocchi.statsd &&
-# salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:statsd' state.sls gnocchi.statsd
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-- description: Install panko server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@panko:server and *01*' state.sls panko &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@panko:server' state.sls panko
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install ceilometer server on first node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceilometer:server and *01*' state.sls ceilometer
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install ceilometer server on other nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceilometer:server' state.sls ceilometer
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install aodh server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aodh:server and *01*' state.sls aodh &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aodh:server' state.sls aodh
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install OpenStack dashboard and proxy services
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Install manila-api on first node
- cmd: |
- salt -C 'I@manila:api and *01*' state.sls manila.api;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install manila-api on other nodes
- cmd: |
- salt -C 'I@manila:api and not *01*' state.sls manila.api;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install manila-scheduler
- cmd: |
- salt -C 'I@manila:scheduler' state.sls manila.scheduler;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install manila-share
- cmd: |
- salt -C 'I@manila:share' state.sls manila.share;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check manila-services
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: Create manila type
- cmd: |
- salt 'cfg01*' state.sls manila.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Create CIFS and NFS share and check it status
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create CIFS 1 --share-type=default';
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create NFS 1 --share-type=default';
- sleep 5;
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila list';
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install DogTag server service
-- description: Install DogTag server service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@dogtag:server and *01*' state.sls dogtag.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install DogTag server service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@dogtag:server' state.sls dogtag.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install Barbican server service
-- description: Install Barbican server service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@barbican:server and *01*' state.sls barbican.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Barbican server service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@barbican:server' state.sls barbican.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Barbican client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@barbican:client' state.sls barbican.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: True
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-ssl/run_test.sh b/tcp_tests/templates/virtual-offline-ssl/run_test.sh
deleted file mode 100755
index 747f959..0000000
--- a/tcp_tests/templates/virtual-offline-ssl/run_test.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/bash
-
-. /home/jenkins/fuel-devops30/bin/activate
-pip install -r ./tcp_tests/requirements.txt -U
-pip install psycopg2
-
-export ENV_NAME=virtual-offline-ssl
-export VENV_PATH=/home/jenkins/fuel-devops30
-export IMAGE_PATH1604=/home/jenkins/images/xenial-server-cloudimg-amd64.qcow2
-export SHUTDOWN_ENV_ON_TEARDOWN=false
-export PYTHONIOENCODING=UTF-8
-export LAB_CONFIG_NAME=virtual-offline-ssl
-export CLUSTER_NAME=virtual-offline-ssl
-export REPOSITORY_SUITE=proposed
-export DISTRIB_CODENAME=xenial
-export SALT_VERSION=2017.7
-
-export TEST_GROUP=test_mcp_pike_ovs_install
-export RUN_TEMPEST=true
-
-# Offline deploy parameters
-#export SALT_MODELS_REF_CHANGE=refs/changes/44/15144/1
-
-export BOOTSTRAP_TIMEOUT=1200
-
-export HOST_APT=10.170.0.226
-export HOST_SALTSTACK=10.170.0.226
-export HOST_ARCHIVE_UBUNTU=10.170.0.226
-export HOST_MIRROR_MCP_MIRANTIS=10.170.0.226
-export HOST_MIRROR_FUEL_INFRA=10.170.0.226
-export HOST_PPA_LAUNCHPAD=10.170.0.226
-
-export SALT_MODELS_SYSTEM_REPOSITORY=https://gerrit.mcp.mirantis.local.test/salt-models/reclass-system
-export SALT_FORMULAS_REPO=https://gerrit.mcp.mirantis.local.test/salt-formulas
-export FORMULA_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial ${REPOSITORY_SUITE} salt extra"
-export FORMULA_GPG="http://apt.mirantis.local.test/public.gpg"
-export SALT_REPOSITORY = "deb [arch=amd64] http://mirror.mirantis.local.test/" + REPOSITORY_SUITE+ "/saltstack-" + SALT_VERSION+ "/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main"
-#export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2017.7 main"
-export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main restricted universe"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main restricted universe"
-
-cd tcp_tests
-py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/templates/virtual-offline-ssl/salt.yaml b/tcp_tests/templates/virtual-offline-ssl/salt.yaml
deleted file mode 100644
index 46ab1e4..0000000
--- a/tcp_tests/templates/virtual-offline-ssl/salt.yaml
+++ /dev/null
@@ -1,94 +0,0 @@
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_APT01 with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_PRX01 with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_MDB01 with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_MDB02 with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_MDB03 with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_SHARE01 with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-offline-ssl/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.local.test/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_CONTROL_PREFIX with context %}
-
-
-- description: Check nginx APT node is ready
- cmd: systemctl status nginx;
- node_name: {{ HOSTNAME_APT01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check dnsmasq on APT node is ready
- cmd: systemctl status dnsmasq;
- node_name: {{ HOSTNAME_APT01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL03) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_GTW01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_PRX01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_MDB01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_MDB02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_MDB03) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_SHARE01) }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-- description: 'Workaround of local_repo_url - set to offline image repository structure'
- cmd: |
- find /srv/salt/reclass/classes/cluster -type f -exec sed -i 's/local_repo_url: .*/local_repo_url: mirror.mcp.mirantis.local.test/g' {} +
- find /srv/salt/reclass/classes/cluster -type f -exec sed -i 's/aptly_server_address: .*/aptly_server_address: {{ os_env('HOST_APT') }}/g' {} +
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: '*Workaround* to remove apt key until migrate on CC'
- cmd: salt-key -d apt01.virtual-offline-pike-ovs-dpdk -y
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: true
-
-- description: '*Workaround* stop minion on apt like proxy node'
- cmd: systemctl stop salt-minion.service
- node_name: {{ HOSTNAME_APT01 }}
- retry: {count: 1, delay: 1}
- skip_fail: true
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog" "dogtag" "runtest" "manila" "auditd" "logrotate" "gnocchi" "neutron" ') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-- description: Enable hugepages on cmp nodes
- cmd: salt 'cmp*' cmd.run "apt-get install -y hugepages; echo 2048 > /proc/sys/vm/nr_hugepages";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#- description: Workaround to avoid reboot cmp nodes bring OVS interfaces UP
-# cmd: |
-# salt 'cmp*' cmd.run "ifup br-mesh";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-ssl/underlay--meta-data.yaml b/tcp_tests/templates/virtual-offline-ssl/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-offline-ssl/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-apt01.yaml b/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-apt01.yaml
deleted file mode 100644
index fe2c8f3..0000000
--- a/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-apt01.yaml
+++ /dev/null
@@ -1,115 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifup ens4
- - sudo ifup ens5
- - sudo ifup ens6
-
- - rm /etc/resolv.conf
- - touch /etc/resolv.conf
- - export LOCAL_DNS_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
- - echo "nameserver $LOCAL_DNS_IP" >> /etc/resolv.conf;
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "supersede domain-name-servers $LOCAL_DNS_IP, 172.18.208.44" >> /etc/dhcp/dhclient.conf
- - export TERM=linux
- - export LANG=C
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## Cloud repo01 node ##################
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - eatmydata apt-get clean && apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y salt-minion python-pip git curl tmux byobu iputils-ping traceroute htop tree ntp;
- - openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=gerrit.mcp.mirantis.local.test" -keyout /root/ssl-nginx.key -out /root/ssl-nginx.crt;
- - cd /tmp;
- - git clone https://github.com/TatyankaLeontovich/salt-formula-nginx;
- - git clone https://github.com/TatyankaLeontovich/salt-dnsmasq;
- - git clone https://github.com/TatyankaLeontovich/underpillar;
- - mkdir -p /srv/pillar/;
- - mkdir -p /srv/salt;
- - cd /srv/salt;
- - ln -s /tmp/salt-formula-nginx/nginx;
- - ln -s /tmp/salt-dnsmasq/dnsmasq;
- - cp /tmp/underpillar/pillar/*.sls /srv/pillar/;
- - cp /tmp/underpillar/states/*.sls /srv/salt/;
- - cp /srv/pillar/top_localdns.sls /srv/pillar/top.sls;
- - cp /srv/salt/top_localdns.sls /srv/salt/top.sls;
- - find /srv/pillar/ -type f -exec sed -i "s/==LOCAL_DNS_IP==/${LOCAL_DNS_IP}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_APT==/{{ os_env('HOST_APT', 'apt.mirantis.com') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_SALTSTACK==/{{ os_env('HOST_SALTSTACK', 'repo.saltstack.com') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_ARCHIVE_UBUNTU==/{{ os_env('HOST_ARCHIVE_UBUNTU', 'archive.ubuntu.com') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_MCP_MIRANTIS==/{{ os_env('HOST_MIRROR_MCP_MIRANTIS', 'mirror.mcp.mirantis.net') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_FUEL_INFRA==/{{ os_env('HOST_MIRROR_FUEL_INFRA', 'mirror.fuel-infra.org') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_PPA_LAUNCHPAD==/{{ os_env('HOST_PPA_LAUNCHPAD', 'ppa.launchpad.net') }}/g" {} +
- - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.com') }}/g" {} +
- - salt-call --local --state-output=mixed state.sls dnsmasq;
- - salt-call --local --state-output=mixed state.sls nginx;
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
- auto ens4
- iface ens4 inet dhcp
- auto ens5
- iface ens5 inet dhcp
- auto ens6
- iface ens6 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 800a0b1..0000000
--- a/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
-
- #- sudo route add default gw {gateway} {interface_name}
- - rm /etc/resolv.conf
- - touch /etc/resolv.conf
- - LOCAL_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1 | cut -d"." -f1-3)
- - export DNS_IP=$LOCAL_IP".122"
- - echo "nameserver $DNS_IP" >> /etc/resolv.conf;
- - echo "nameserver $LOCAL_IP.1" >> /etc/resolv.conf;
- - echo "supersede domain-name-servers $DNS_IP" >> /etc/dhcp/dhclient.conf
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-offline-ssl/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-offline-ssl/underlay--user-data1604.yaml
deleted file mode 100644
index c5fc670..0000000
--- a/tcp_tests/templates/virtual-offline-ssl/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,98 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- - rm /etc/resolv.conf
- - touch /etc/resolv.conf
- - LOCAL_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1 | cut -d"." -f1-3)
- - export DNS_IP=$LOCAL_IP".122"
- - echo "nameserver $DNS_IP" >> /etc/resolv.conf;
- - echo "nameserver $LOCAL_IP.1" >> /etc/resolv.conf;
- - echo "supersede domain-name-servers $DNS_IP" >> /etc/dhcp/dhclient.conf
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- - echo "Preparing base OS"
- # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
-
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
-
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/virtual-offline-ssl/underlay.yaml b/tcp_tests/templates/virtual-offline-ssl/underlay.yaml
deleted file mode 100644
index 84a1629..0000000
--- a/tcp_tests/templates/virtual-offline-ssl/underlay.yaml
+++ /dev/null
@@ -1,623 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-offline-ssl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-offline-ssl/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-offline-ssl/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'virtual-offline-ssl/underlay--user-data-apt01.yaml' as CLOUDINIT_USER_DATA_APT01 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_apt01 {{ CLOUDINIT_USER_DATA_APT01 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-offline-ssl') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_APT01 = os_env('HOSTNAME_APT01', 'apt01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-offline-ssl' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MDB01 }}: +45
- default_{{ HOSTNAME_MDB02 }}: +46
- default_{{ HOSTNAME_MDB03 }}: +47
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +122
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MDB01 }}: +45
- default_{{ HOSTNAME_MDB02 }}: +46
- default_{{ HOSTNAME_MDB03 }}: +47
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MDB01 }}: +45
- default_{{ HOSTNAME_MDB02 }}: +46
- default_{{ HOSTNAME_MDB03 }}: +47
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_APT01 }}: +122
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MDB01 }}: +45
- default_{{ HOSTNAME_MDB02 }}: +46
- default_{{ HOSTNAME_MDB03 }}: +47
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: !os_env MCP_IMAGE_PATH1604
- format: qcow2
- - name: apt_cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env APT_IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_APT01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: apt_cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_apt01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 12
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- numa:
- - cpus: 0,1,2,3,4,5
- memory: 4096
- - cpus: 6,7,8,9,10,11
- memory: 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 12
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- numa:
- - cpus: 0,1,2,3,4,5
- memory: 4096
- - cpus: 6,7,8,9,10,11
- memory: 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_SHARE01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/tests/system/test_cvp_pipelines.py b/tcp_tests/tests/system/test_cvp_pipelines.py
index 7100c51..830320d 100644
--- a/tcp_tests/tests/system/test_cvp_pipelines.py
+++ b/tcp_tests/tests/system/test_cvp_pipelines.py
@@ -130,15 +130,20 @@
ntp_skipped_nodes = ''
job_name = 'cvp-sanity'
+ skipped_packages = ("python-setuptools,"
+ "python-pkg-resources,xunitmerge,"
+ "python-gnocchiclient, "
+ "python-ujson,python-octaviaclient")
+
job_parameters = {
- 'TESTS_SETTINGS': (
- "skipped_packages='python-setuptools,"
- "python-pkg-resources,xunitmerge,python-gnocchiclient,"
- "python-ujson,python-octaviaclient'; "
- "skipped_modules='xunitmerge,setuptools'; "
- "skipped_services='docker,"
- "containerd'; drivetrain_version={0};{1}"
- .format(settings.MCP_VERSION, ntp_skipped_nodes)),
+ 'EXTRA_PARAMS': (
+ """
+ envs:
+ - skipped_packages='{0}'
+ - skipped_modules='xunitmerge,setuptools'
+ - skipped_services='docker,containerd'
+ - skipped_nodes='{1}'"""
+ .format(skipped_packages, ntp_skipped_nodes)),
}
show_step(2)
diff --git a/tcp_tests/tests/system/test_failover_k8s.py b/tcp_tests/tests/system/test_failover_k8s.py
index 60ac4a7..850cfd1 100644
--- a/tcp_tests/tests/system/test_failover_k8s.py
+++ b/tcp_tests/tests/system/test_failover_k8s.py
@@ -57,9 +57,9 @@
new_minion_vip =\
core_actions.get_keepalived_vip_minion_id(vip)
except Exception:
- time.sleep(15)
- new_minion_vip = \
- core_actions.get_keepalived_vip_minion_id(vip)
+ time.sleep(15)
+ new_minion_vip = \
+ core_actions.get_keepalived_vip_minion_id(vip)
LOG.info("VIP {0} migrated to {1}".format(vip, new_minion_vip))
assert new_minion_vip != minion_vip
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index d10d250..0c74d20 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -356,9 +356,9 @@
pillar = 'keepalived:cluster:instance:cicd_control_vip:address'
addresses = salt_deployed.get_pillar('cid01*', pillar)
- ip = list(set([ip
+ ip = list(set([ipaddr
for item in addresses
- for node, ip in item.items() if ip]))
+ for node, ipaddr in item.items() if ipaddr]))
LOG.info('Jenkins ip is {}'.format(ip))
try:
assert len(ip) > 0, 'fail to find jenkins ip'
@@ -367,9 +367,9 @@
tgt='cid*', fun='cmd.run',
args='service keepalived restart')
addresses = salt_deployed.get_pillar('cid01*', pillar)
- ip = list(set([ip
+ ip = list(set([ipaddr
for item in addresses
- for node, ip in item.items() if ip]))
+ for node, ipaddr in item.items() if ipaddr]))
LOG.info('Jenkins ip is {}'.format(ip))
assert len(ip) > 0, 'fail to find jenkins ip {}'.format(addresses)
@@ -457,9 +457,9 @@
pillar = 'keepalived:cluster:instance:cicd_control_vip:address'
addresses = salt_deployed.get_pillar('cid01*', pillar)
- ip = list(set([ip
+ ip = list(set([ipaddr
for item in addresses
- for node, ip in item.items() if ip]))
+ for node, ipaddr in item.items() if ipaddr]))
LOG.info('Jenkins ip is {}'.format(ip))
try:
assert len(ip) > 0, 'fail to find jenkins ip'
@@ -468,9 +468,9 @@
tgt='cid*', fun='cmd.run',
args='service keepalived restart')
addresses = salt_deployed.get_pillar('cid01*', pillar)
- ip = list(set([ip
+ ip = list(set([ipaddr
for item in addresses
- for node, ip in item.items() if ip]))
+ for node, ipaddr in item.items() if ipaddr]))
LOG.info('Jenkins ip is {}'.format(ip))
assert len(ip) > 0, 'fail to find jenkins ip {}'.format(addresses)
diff --git a/tcp_tests/utils/create_env_jenkins_cicd.py b/tcp_tests/utils/create_env_jenkins_cicd.py
new file mode 100644
index 0000000..ba73d6c
--- /dev/null
+++ b/tcp_tests/utils/create_env_jenkins_cicd.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+import os
+import sys
+
+sys.path.append(os.getcwd())
+try:
+ from tcp_tests.fixtures import config_fixtures
+ from tcp_tests.managers import underlay_ssh_manager
+ from tcp_tests.managers import saltmanager as salt_manager
+except ImportError:
+ print("ImportError: Run the application from the tcp-qa directory or "
+ "set the PYTHONPATH environment variable to directory which contains"
+ " ./tcp_tests")
+ sys.exit(1)
+
+
+def main():
+ tests_configs = os.environ.get('TESTS_CONFIGS', None)
+ if not tests_configs or not os.path.isfile(tests_configs):
+ print("Please set TESTS_CONFIGS environment variable whith"
+ "the path to INI file with lab metadata.")
+ return 1
+ config = config_fixtures.config()
+ underlay = underlay_ssh_manager.UnderlaySSHManager(config)
+ saltmanager = salt_manager.SaltManager(config, underlay)
+ saltmanager.create_env_jenkins_cicd()
+ saltmanager.create_env_k8s()
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tox.ini b/tox.ini
index 9014d59..3a36e88 100644
--- a/tox.ini
+++ b/tox.ini
@@ -20,7 +20,8 @@
commands = {posargs}
[testenv:pep8]
-deps = flake8
+deps =
+ flake8
usedevelop = False
exclude = .venv,.git,.tox,.chache,.lib,dist,doc,*egg,build,local*
commands =