Merge "Add ctl01 as target Related-Prod:PROD-36446"
diff --git a/checklist.yaml b/checklist.yaml
index 107c75f..078bc8f 100644
--- a/checklist.yaml
+++ b/checklist.yaml
@@ -359,6 +359,13 @@
status: ProdFailed
defects: PROD-35761
+ - title: test_ceph_health
+ errors:
+ - client is using insecure global_id reclaim
+ - mons are allowing insecure global_id reclaim
+ status: ProdFailed
+ defects: PROD-36461
+
- title: test_prometheus_alert_count[CephMonitorDownMinor]
errors:
- "1 of Ceph Monitors are down"
diff --git a/jobs/global.yaml b/jobs/global.yaml
index ea6deba..d0551a8 100644
--- a/jobs/global.yaml
+++ b/jobs/global.yaml
@@ -4,7 +4,13 @@
Do not edit this job through the web ! <br>
Please use jenkins-job-builder in git <br>
git clone ssh://gerrit.mcp.mirantis.com:29418/mcp/tcp-qa
- current-version: 2019.2.15
- previous-version: 2019.2.14
+
+ <h1><font color='red'>
+ <!--- INSERT ALERT HERE vvvvvvv --->
+
+ <!--- INSERT ALERT HERE ^^^^^^^ --->
+ </font></h1>
+ current-version: 2019.2.17
+ previous-version: 2019.2.16
disabled-proposed: false
disabled-2019-2-0: true
\ No newline at end of file
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index 463bc24..f72b28f 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -148,19 +148,6 @@
deploy(shared, common, steps, env_manager, batch_size, dist_upgrade_nodes, upgrade_saltstack)
// run test stages
test(shared, common, steps, env_manager)
- //run upgrade env to proposed
- if (env.RUN_UPGRADE_AFTER_JOB == "true") {
- network_backend = env.PLATFORM_STACK_INSTALL.contains("contrail") ? 'contrail' : 'dvr'
- upgrade_job = "mcp-update-${env.TEMPEST_IMAGE_VERSION}-${network_backend}-sl"
- def deploy = build job: "${upgrade_job}",
- parameters: [
- string(name: 'PARENT_NODE_NAME', value: "openstack_slave_${env.LAB_CONFIG_NAME}"),
- string(name: 'TCP_QA_REFS', value: env.TCP_QA_REFS),
- string(name: 'NODE', value: "openstack_slave_${env.LAB_CONFIG_NAME}")
- ],
- wait: false,
- propagate: false
- }
} catch (e) {
common.printMsg("Job is failed: " + e.message, "purple")
throw e
@@ -223,7 +210,21 @@
} // node
}
}
-
+ //run upgrade env to proposed
+ if (env.RUN_UPGRADE_AFTER_JOB == "true" && currentBuild.result == 'SUCCESS') {
+ network_backend = env.PLATFORM_STACK_INSTALL.contains("contrail") ? 'contrail' : 'dvr'
+ upgrade_job = "mcp-update-${env.TEMPEST_IMAGE_VERSION}-${network_backend}-sl"
+ def deploy = build job: "${upgrade_job}",
+ parameters: [
+ string(name: 'PARENT_NODE_NAME', value: "openstack_slave_${env.LAB_CONFIG_NAME}"),
+ string(name: 'TCP_QA_REFS', value: env.TCP_QA_REFS),
+ string(name: 'PASSED_STEPS', value: steps),
+ string(name: 'TEMPEST_TEST_SUITE_NAME', value: env.TEMPEST_TEST_SUITE_NAME),
+ string(name: 'NODE', value: "openstack_slave_${env.LAB_CONFIG_NAME}")
+ ],
+ wait: false,
+ propagate: false
+ }
} // try
} // node
diff --git a/jobs/pipelines/run-test-scenarios.groovy b/jobs/pipelines/run-test-scenarios.groovy
index 76d5cb2..f2f3e74 100644
--- a/jobs/pipelines/run-test-scenarios.groovy
+++ b/jobs/pipelines/run-test-scenarios.groovy
@@ -25,6 +25,7 @@
def steps = env.PASSED_STEPS
def make_snapshot_stages = false
env.LAB_CONFIG_NAME = env.LAB_CONFIG_NAME ?: env.ENV_NAME
+env.TEST_PLAN_NAME_PREFIX = '[2019.2.0-update]Upgraded'
timeout(time: 23, unit: 'HOURS') {
node ("${PARENT_NODE_NAME}") {
@@ -52,34 +53,40 @@
}
} // stage("Run tests")
- stage("Archive all xml reports") {
- dir("${env.slave_workdir }") {
- archiveArtifacts artifacts: "**/*.xml,**/*.log"
+ if(currentBuild.result != 'FAILURE'){
+ stage("Archive all xml reports") {
+ dir("${env.slave_workdir }") {
+ archiveArtifacts artifacts: "**/*.xml,**/*.log"
+ }
+ }
+ stage("report results to testrail from jenkins master") {
+ if ("${env.REPORT_TO_TESTRAIL}" != "false") {
+ common.infoMsg("Running on: " + env.PARENT_NODE_NAME, "blue")
+ shared.verbose_sh("""\
+ [ -d /home/jenkins/venv_testrail_reporter ] || virtualenv --python=python3.7 /home/jenkins/venv_testrail_reporter""", true, false, true)
+ shared.run_cmd("""\
+ . /home/jenkins/venv_testrail_reporter/bin/activate; pip install git+https://github.com/dis-xcom/testrail_reporter -U""")
+ shared.swarm_testrail_report(steps, env.PARENT_NODE_NAME)
+ } else {
+ common.infoMsg("REPORT_TO_TESTRAIL is set to FALSE. Skipped this step ")
}
+ } // stage("report results to testrail from jenkins master")
+ stage("Store TestRail reports to job description") {
+ if ("${env.REPORT_TO_TESTRAIL}" != "false") {
+ if (fileExists("description.txt")) {
+ def String description = readFile("description.txt")
+ currentBuild.description += "${description}"
+ }
+ } else {
+ common.infoMsg("REPORT_TO_TESTRAIL is set to FALSE. Skipped this step ")
+ println(currentBuild.result)
+ }
+ } // stage("Store TestRail reports to job description")
+ }//report if success
+ else{
+ common.infoMsg("Upgrade status different from successful. Skipped report steps.")
+ println("Current result is " + currentBuild.result)
}
-
- stage("report results to testrail from jenkins master") {
- if ("${env.REPORT_TO_TESTRAIL}" != "false") {
- common.infoMsg("Running on: " + env.PARENT_NODE_NAME, "blue")
- shared.verbose_sh("""\
- [ -d /home/jenkins/venv_testrail_reporter ] || virtualenv --python=python3.7 /home/jenkins/venv_testrail_reporter""", true, false, true)
- shared.run_cmd("""\
- . /home/jenkins/venv_testrail_reporter/bin/activate; pip install git+https://github.com/dis-xcom/testrail_reporter -U""")
- shared.swarm_testrail_report(steps, env.PARENT_NODE_NAME)
- } else {
- common.infoMsg("REPORT_TO_TESTRAIL is set to FALSE. Skipped this step ")
- }
- } // stage("report results to testrail from jenkins master")
- stage("Store TestRail reports to job description") {
- if ("${env.REPORT_TO_TESTRAIL}" != "false") {
- if (fileExists("description.txt")) {
- def String description = readFile("description.txt")
- currentBuild.description += "${description}"
- }
- } else {
- common.infoMsg("REPORT_TO_TESTRAIL is set to FALSE. Skipped this step ")
- }
- } // stage("Store TestRail reports to job description")
} // dir
} // node
} // timeout
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
index e652b72..69d6ea6 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
@@ -284,7 +284,7 @@
[ -d /home/jenkins/venv_testrail_analyzer ] || virtualenv --python=python3 /home/jenkins/venv_testrail_analyzer
""", true, false, true)
shared.run_cmd("""\
- . /home/jenkins/venv_testrail_analyzer/bin/activate; pip install git+https://review.gerrithub.io/ibumarskov/testrail-reporter@refs/changes/94/514594/3
+ . /home/jenkins/venv_testrail_analyzer/bin/activate; pip install git+https://review.gerrithub.io/ibumarskov/testrail-reporter
""")
shared.update_working_dir()
}
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index 1ebee5a..1771b5d 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -29,7 +29,7 @@
currentBuild.description = "${PARENT_NODE_NAME}:${ENV_NAME}"
-timeout(time: 10, unit: 'HOURS') {
+timeout(time: 20, unit: 'HOURS') {
timestamps {
node ("${PARENT_NODE_NAME}") {
if (! fileExists("${PARENT_WORKSPACE}")) {
diff --git a/jobs/project.yaml b/jobs/project.yaml
index 7aae28c..c2e37a7 100644
--- a/jobs/project.yaml
+++ b/jobs/project.yaml
@@ -22,6 +22,7 @@
- heat-bm-cicd-queens-contrail-sl
- bm-e7-cicd-pike-ovs-maas
- bm-e7-cicd-pike-odl-maas
+ - bm-b300-cicd-queens-ovs-maas
- released-bm-pike-ovs
# --- Released envs ------
- deploy-released:
diff --git a/jobs/templates/2019.2.0-heat-cicd-pike-dvr-sl.yml b/jobs/templates/2019.2.0-heat-cicd-pike-dvr-sl.yml
index 66cd438..be346cd 100644
--- a/jobs/templates/2019.2.0-heat-cicd-pike-dvr-sl.yml
+++ b/jobs/templates/2019.2.0-heat-cicd-pike-dvr-sl.yml
@@ -150,7 +150,7 @@
description: ''
name: ENV_MANAGER
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
name: OS_AUTH_URL
trim: 'false'
@@ -202,7 +202,7 @@
description: Whether to perform dist-upgrade on virtual nodes during deployment
name: DIST_UPGRADE_NODES
- bool:
- default: false
+ default: true
description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
name: UPGRADE_SALTSTACK
pipeline-scm:
diff --git a/jobs/templates/2019.2.0-heat-cicd-queens-contrail41-sl.yml b/jobs/templates/2019.2.0-heat-cicd-queens-contrail41-sl.yml
index 687d47d..d8302c2 100644
--- a/jobs/templates/2019.2.0-heat-cicd-queens-contrail41-sl.yml
+++ b/jobs/templates/2019.2.0-heat-cicd-queens-contrail41-sl.yml
@@ -150,7 +150,7 @@
description: ''
name: ENV_MANAGER
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
name: OS_AUTH_URL
trim: 'false'
@@ -207,7 +207,7 @@
description: Whether to perform dist-upgrade on virtual nodes during deployment
name: DIST_UPGRADE_NODES
- bool:
- default: false
+ default: true
description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
name: UPGRADE_SALTSTACK
pipeline-scm:
diff --git a/jobs/templates/2019.2.0-heat-cicd-queens-dvr-sl.yml b/jobs/templates/2019.2.0-heat-cicd-queens-dvr-sl.yml
index 230379f..5e575af 100644
--- a/jobs/templates/2019.2.0-heat-cicd-queens-dvr-sl.yml
+++ b/jobs/templates/2019.2.0-heat-cicd-queens-dvr-sl.yml
@@ -150,7 +150,7 @@
description: ''
name: ENV_MANAGER
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
name: OS_AUTH_URL
trim: 'false'
@@ -207,7 +207,7 @@
description: Whether to perform dist-upgrade on virtual nodes during deployment
name: DIST_UPGRADE_NODES
- bool:
- default: false
+ default: true
description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
name: UPGRADE_SALTSTACK
pipeline-scm:
diff --git a/jobs/templates/bm-b300-cicd-queens-ovs-maas.yml b/jobs/templates/bm-b300-cicd-queens-ovs-maas.yml
new file mode 100644
index 0000000..e64a02c
--- /dev/null
+++ b/jobs/templates/bm-b300-cicd-queens-ovs-maas.yml
@@ -0,0 +1,222 @@
+- job-template:
+ project-type: pipeline
+ description: '{job-description}'
+ concurrent: true
+ disabled: false
+ name: bm-b300-cicd-queens-ovs-maas
+ parameters:
+ - string:
+ default: bm-b300-cicd-queens-ovs-maas
+ description: ''
+ name: LAB_CONFIG_NAME
+ trim: 'false'
+ - string:
+ default: core,kvm,cicd
+ description: Comma-separated list of stacks to deploy the drivetrain (salt
+ cluster and cicd nodes)
+ name: DRIVETRAIN_STACK_INSTALL
+ trim: 'false'
+ - string:
+ default: '24000'
+ description: ''
+ name: DRIVETRAIN_STACK_INSTALL_TIMEOUT
+ trim: 'false'
+ - string:
+ default: openstack,ovs,ceph,stacklight
+ description: Comma-separated list of stacks to deploy the target platform
+ (openstack and additional components)
+ name: PLATFORM_STACK_INSTALL
+ trim: 'false'
+ - string:
+ default: '24000'
+ description: ''
+ name: PLATFORM_STACK_INSTALL_TIMEOUT
+ trim: 'false'
+ - string:
+ default: 2019.2.0
+ description: ''
+ name: MCP_VERSION
+ trim: 'false'
+ - string:
+ default: sre-team-infra
+ description: ''
+ name: NODE_NAME
+ trim: 'false'
+ - string:
+ default: /home/jenkins/images/ubuntu-16-04-x64-mcp2019.2.0.qcow2
+ description: ''
+ name: MCP_IMAGE_PATH1604
+ trim: 'false'
+ - string:
+ default: /home/jenkins/images/cfg01-day01.qcow2
+ description: ''
+ name: IMAGE_PATH_CFG01_DAY01
+ trim: 'false'
+ - string:
+ default: cfg01.${{LAB_CONFIG_NAME}}-config-drive.iso
+ description: ISO name that will be generated and downloaded to the /home/jenkins/images/
+ name: CFG01_CONFIG_IMAGE_NAME
+ trim: 'false'
+ - string:
+ default: bm-b300-cicd-queens-ovs-maas
+ description: ''
+ name: ENV_NAME
+ trim: 'false'
+ - string:
+ default: ''
+ description: |-
+ Example: refs/changes/89/411189/36
+ (for now - only one reference allowed)
+ name: TCP_QA_REFS
+ trim: 'false'
+ - string:
+ default: refs/heads/release/proposed/2019.2.0
+ description: reference to patchset in pipeline-library
+ name: PIPELINE_LIBRARY_REF
+ trim: 'false'
+ - string:
+ default: refs/heads/release/proposed/2019.2.0
+ description: reference to patchset in mk-pipelines
+ name: MK_PIPELINES_REF
+ trim: 'false'
+ - string:
+ default: release/proposed/2019.2.0
+ description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
+ value
+ name: COOKIECUTTER_TEMPLATE_COMMIT
+ trim: 'false'
+ - string:
+ default: release/proposed/2019.2.0
+ description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
+ value
+ name: SALT_MODELS_SYSTEM_COMMIT
+ trim: 'false'
+ - string:
+ default: -m "run_cvp_func_sanity|run_cvp_tempest|run_stacklight"
+ description: |-
+ Pytest option -k or -m, with expression to select necessary tests.
+ Additional pytest options are allowed.
+ name: RUN_TEST_OPTS
+ trim: 'false'
+ - bool:
+ default: true
+ description: ''
+ name: SHUTDOWN_ENV_ON_TEARDOWN
+ - string:
+ default: ''
+ description: ''
+ name: COOKIECUTTER_REF_CHANGE
+ trim: 'false'
+ - string:
+ default: ''
+ description: ''
+ name: ENVIRONMENT_TEMPLATE_REF_CHANGE
+ trim: 'false'
+ - string:
+ default: '[MCP1.1_QUEENS]Tempest'
+ description: ''
+ name: TEMPEST_TEST_SUITE_NAME
+ trim: 'false'
+ - string:
+ default: queens
+ description: ''
+ name: TEMPEST_IMAGE_VERSION
+ trim: 'false'
+ - string:
+ default: proposed
+ description: ''
+ name: UPDATE_REPO_CUSTOM_TAG
+ trim: 'false'
+ - bool:
+ default: true
+ description: If set, reports will be created in TestRail for this build
+ name: REPORT_TO_TESTRAIL
+ - choice:
+ choices:
+ - heat
+ - devops
+ description: ''
+ name: ENV_MANAGER
+ - string:
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
+ description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
+ name: OS_AUTH_URL
+ trim: 'false'
+ - string:
+ default: sre-team
+ description: OpenStack project (tenant) name
+ name: OS_PROJECT_NAME
+ trim: 'false'
+ - string:
+ default: default
+ description: OpenStack user domain name
+ name: OS_USER_DOMAIN_NAME
+ trim: 'false'
+ - string:
+ default: sre-qa-ci-eu
+ description: Jenkins credentials ID with username and password to create a
+ heat stack in OpenStack
+ name: OS_CREDENTIALS
+ trim: 'false'
+ - string:
+ default: eu-cloud-low-flavors.env
+ description: |-
+ Heat template environment filename with 'parameter_defaults' dict, placed in tcp_tests/templates/_heat_environments/
+ , for example: microcloud-8116.env
+ name: LAB_PARAM_DEFAULTS
+ trim: 'false'
+ - string:
+ default: release/proposed/2019.2.0
+ description: ''
+ name: JENKINS_PIPELINE_BRANCH
+ trim: 'false'
+ - string:
+ default: refs/heads/release/proposed/2019.2.0
+ description: ''
+ name: MCP_COMMON_SCRIPTS_REFS
+ trim: 'false'
+ - string:
+ default: proposed
+ description: ''
+ name: UPDATE_VERSION
+ trim: 'false'
+ - string:
+ name: IPMI_CREDS
+ default: 'lab_engineer'
+ - string:
+ default: ''
+ description: ''
+ name: TEMPEST_EXTRA_ARGS
+ trim: 'false'
+ - password:
+ name: CISCO_PASS
+ default: '1fac0DlhILBo'
+ - string:
+ default: ''
+ description: ''
+ name: SALT_MODELS_SYSTEM_REF_CHANGE
+ trim: 'false'
+ - string:
+ default: ''
+ description: ''
+ name: BATCH_SIZE
+ trim: 'false'
+ - bool:
+ default: false
+ description: Whether to perform dist-upgrade on virtual nodes during deployment
+ name: DIST_UPGRADE_NODES
+ - bool:
+ default: true
+ description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+ name: UPGRADE_SALTSTACK
+ pipeline-scm:
+ lightweight-checkout: false
+ scm:
+ - git:
+ branches:
+ - FETCH_HEAD
+ refspec: ${{TCP_QA_REFS}}
+ url: https://gerrit.mcp.mirantis.com/mcp/tcp-qa
+ script-path: jobs/pipelines/deploy-cicd-and-run-tests.groovy
+ logrotate:
+ daysToKeep: 365
diff --git a/jobs/templates/bm-cicd-pike-ovs-maas.yml b/jobs/templates/bm-cicd-pike-ovs-maas.yml
index d3dfb38..a3b6e4a 100644
--- a/jobs/templates/bm-cicd-pike-ovs-maas.yml
+++ b/jobs/templates/bm-cicd-pike-ovs-maas.yml
@@ -138,7 +138,7 @@
description: ''
name: ENV_MANAGER
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
name: OS_AUTH_URL
trim: 'false'
diff --git a/jobs/templates/bm-cicd-queens-ovs-maas.yml b/jobs/templates/bm-cicd-queens-ovs-maas.yml
index 2b104f7..73b9196 100644
--- a/jobs/templates/bm-cicd-queens-ovs-maas.yml
+++ b/jobs/templates/bm-cicd-queens-ovs-maas.yml
@@ -138,7 +138,7 @@
description: ''
name: ENV_MANAGER
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
name: OS_AUTH_URL
trim: 'false'
@@ -206,7 +206,7 @@
description: Whether to perform dist-upgrade on virtual nodes during deployment
name: DIST_UPGRADE_NODES
- bool:
- default: false
+ default: true
description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
name: UPGRADE_SALTSTACK
pipeline-scm:
diff --git a/jobs/templates/bm-e7-cicd-pike-odl-maas.yml b/jobs/templates/bm-e7-cicd-pike-odl-maas.yml
index 1b21f1c..26e463e 100644
--- a/jobs/templates/bm-e7-cicd-pike-odl-maas.yml
+++ b/jobs/templates/bm-e7-cicd-pike-odl-maas.yml
@@ -43,6 +43,11 @@
name: MCP_VERSION
trim: 'false'
- string:
+ default: ''
+ description: ''
+ name: SALT_MODELS_SYSTEM_REF_CHANGE
+ trim: 'false'
+ - string:
default: sre-team-infra
description: ''
name: NODE_NAME
@@ -143,7 +148,7 @@
description: ''
name: ENV_MANAGER
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
name: OS_AUTH_URL
trim: 'false'
diff --git a/jobs/templates/bm-e7-cicd-pike-ovs-maas.yml b/jobs/templates/bm-e7-cicd-pike-ovs-maas.yml
index b0feb54..131f7e3 100644
--- a/jobs/templates/bm-e7-cicd-pike-ovs-maas.yml
+++ b/jobs/templates/bm-e7-cicd-pike-ovs-maas.yml
@@ -70,6 +70,11 @@
name: TCP_QA_REFS
trim: 'false'
- string:
+ default: ''
+ description: ''
+ name: SALT_MODELS_SYSTEM_REF_CHANGE
+ trim: 'false'
+ - string:
default: refs/heads/release/2019.2.0
description: reference to patchset in pipeline-library
name: PIPELINE_LIBRARY_REF
@@ -138,7 +143,7 @@
description: ''
name: ENV_MANAGER
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
name: OS_AUTH_URL
trim: 'false'
diff --git a/jobs/templates/deploy-released.yml b/jobs/templates/deploy-released.yml
index 95f2ff5..126b5e0 100644
--- a/jobs/templates/deploy-released.yml
+++ b/jobs/templates/deploy-released.yml
@@ -33,7 +33,7 @@
name: OS_CREDENTIALS
trim: 'false'
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: ''
name: OS_AUTH_URL
trim: 'false'
diff --git a/jobs/templates/deploy_bm.yml b/jobs/templates/deploy_bm.yml
index ebdb885..0e3c102 100644
--- a/jobs/templates/deploy_bm.yml
+++ b/jobs/templates/deploy_bm.yml
@@ -34,7 +34,7 @@
name: PARENT_NODE_NAME
trim: 'false'
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: ''
name: OS_AUTH_URL
trim: 'false'
diff --git a/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml b/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml
index c55e807..576936f 100644
--- a/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml
+++ b/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml
@@ -145,7 +145,7 @@
description: ''
name: ENV_MANAGER
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
name: OS_AUTH_URL
trim: 'false'
diff --git a/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml b/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml
index cae7d08..313f9c6 100644
--- a/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml
+++ b/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml
@@ -146,7 +146,7 @@
description: ''
name: ENV_MANAGER
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
name: OS_AUTH_URL
trim: 'false'
@@ -220,7 +220,7 @@
description: Whether to perform dist-upgrade on virtual nodes during deployment
name: DIST_UPGRADE_NODES
- bool:
- default: false
+ default: true
description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
name: UPGRADE_SALTSTACK
pipeline-scm:
diff --git a/jobs/templates/heat-cicd-pike-dvr-sl.yml b/jobs/templates/heat-cicd-pike-dvr-sl.yml
index 0248d9d..2ade702 100644
--- a/jobs/templates/heat-cicd-pike-dvr-sl.yml
+++ b/jobs/templates/heat-cicd-pike-dvr-sl.yml
@@ -155,7 +155,7 @@
description: ''
name: ENV_MANAGER
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
name: OS_AUTH_URL
trim: 'false'
diff --git a/jobs/templates/heat-cicd-queens-contrail41-sl.yml b/jobs/templates/heat-cicd-queens-contrail41-sl.yml
index a7e8944..2eca95b 100644
--- a/jobs/templates/heat-cicd-queens-contrail41-sl.yml
+++ b/jobs/templates/heat-cicd-queens-contrail41-sl.yml
@@ -148,7 +148,7 @@
description: ''
name: ENV_MANAGER
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
name: OS_AUTH_URL
trim: 'false'
diff --git a/jobs/templates/heat-cicd-queens-dvr-sl.yml b/jobs/templates/heat-cicd-queens-dvr-sl.yml
index 3ba3d08..bcaaa84 100644
--- a/jobs/templates/heat-cicd-queens-dvr-sl.yml
+++ b/jobs/templates/heat-cicd-queens-dvr-sl.yml
@@ -149,7 +149,7 @@
description: ''
name: ENV_MANAGER
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
name: OS_AUTH_URL
trim: 'false'
@@ -206,7 +206,7 @@
description: Whether to perform dist-upgrade on virtual nodes during deployment
name: DIST_UPGRADE_NODES
- bool:
- default: false
+ default: true
description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
name: UPGRADE_SALTSTACK
pipeline-scm:
diff --git a/jobs/templates/released-bm-pike-ovs.yml b/jobs/templates/released-bm-pike-ovs.yml
index 88cbdef..ceb8563 100644
--- a/jobs/templates/released-bm-pike-ovs.yml
+++ b/jobs/templates/released-bm-pike-ovs.yml
@@ -133,7 +133,7 @@
description: ''
name: ENV_MANAGER
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
name: OS_AUTH_URL
trim: 'false'
@@ -186,7 +186,7 @@
description: Whether to perform dist-upgrade on virtual nodes during deployment
name: DIST_UPGRADE_NODES
- bool:
- default: false
+ default: true
description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
name: UPGRADE_SALTSTACK
pipeline-scm:
diff --git a/jobs/templates/released-heat-cicd-pike-contrail41-sl.yml b/jobs/templates/released-heat-cicd-pike-contrail41-sl.yml
index f0670f0..0e00a79 100644
--- a/jobs/templates/released-heat-cicd-pike-contrail41-sl.yml
+++ b/jobs/templates/released-heat-cicd-pike-contrail41-sl.yml
@@ -153,7 +153,7 @@
description: ''
name: ENV_MANAGER
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
name: OS_AUTH_URL
trim: 'false'
diff --git a/jobs/templates/released-heat-cicd-pike-dvr-sl.yml b/jobs/templates/released-heat-cicd-pike-dvr-sl.yml
index 9273793..da9c52a 100644
--- a/jobs/templates/released-heat-cicd-pike-dvr-sl.yml
+++ b/jobs/templates/released-heat-cicd-pike-dvr-sl.yml
@@ -153,7 +153,7 @@
description: ''
name: ENV_MANAGER
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
name: OS_AUTH_URL
trim: 'false'
diff --git a/jobs/templates/released-heat-cicd-queens-contrail41-sl.yml b/jobs/templates/released-heat-cicd-queens-contrail41-sl.yml
index 4ddd929..89f5d39 100644
--- a/jobs/templates/released-heat-cicd-queens-contrail41-sl.yml
+++ b/jobs/templates/released-heat-cicd-queens-contrail41-sl.yml
@@ -136,7 +136,7 @@
name: TEMPEST_TARGET
trim: 'false'
- string:
- default: proposed
+ default: ''
description: ''
name: UPDATE_REPO_CUSTOM_TAG
trim: 'false'
@@ -151,7 +151,7 @@
description: ''
name: ENV_MANAGER
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
name: OS_AUTH_URL
trim: 'false'
diff --git a/jobs/templates/released-heat-cicd-queens-dvr-sl.yml b/jobs/templates/released-heat-cicd-queens-dvr-sl.yml
index 01921e0..8d845b6 100644
--- a/jobs/templates/released-heat-cicd-queens-dvr-sl.yml
+++ b/jobs/templates/released-heat-cicd-queens-dvr-sl.yml
@@ -153,7 +153,7 @@
description: ''
name: ENV_MANAGER
- string:
- default: https://ic-eu.ssl.mirantis.net:5000/v3
+ default: https://keystone.ic-eu.ssl.mirantis.net/v3
description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
name: OS_AUTH_URL
trim: 'false'
diff --git a/jobs/templates/test-scenarios.yml b/jobs/templates/test-scenarios.yml
index 67d3880..a214e4a 100644
--- a/jobs/templates/test-scenarios.yml
+++ b/jobs/templates/test-scenarios.yml
@@ -63,12 +63,16 @@
test_scenario:
- backup-saltmaster:
- run-test-opts: '--maxfail=1 -k TestBackupRestoreMaster and not maas'
+ run-test-opts: |-
+ tcp_tests/tests/system/test_backup_restore.py::TestBackupRestoreMaster::test_salt_master_manual_backup_restore_pipeline \
+ tcp_tests/tests/system/test_backup_restore.py::TestBackupRestoreMaster::test_salt_master_manual_backup_restore
deployment: heat-cicd-queens-dvr-sl
display-name: Backup/Restore SaltMaster
- backup-saltmaster-with-maas:
- run-test-opts: '--maxfail=1 -k TestBackupRestoreMaster and maas'
+ run-test-opts: |-
+ tcp_tests/tests/system/test_backup_restore.py::TestBackupRestoreMaster::test_maas_manual_backup_restore_pipeline \
+ tcp_tests/tests/system/test_backup_restore.py::TestBackupRestoreMaster::test_maas_backup_restore_manual
deployment: bm-cicd-queens-ovs-maas
display-name: Backup/Restore SaltMaster (with MAAS)
@@ -110,7 +114,11 @@
- ceph_rgw-queens-dvr-sl:
run-test-opts: '--maxfail=1 -k TestCephRgw'
deployment: heat-cicd-queens-dvr-sl
- display-name: Add/Remove MGR node
+ display-name: Add/Remove RGW node
+ - ceph_failover:
+ run-test-opts: '--maxfail=1 -k TestFailoverCeph'
+ deployment: heat-cicd-queens-dvr-sl
+ display-name: Failover tests for Ceph nodes
jobs:
- '{test_scenario}'
logrotate:
@@ -143,34 +151,40 @@
tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_tempest \
tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_func_sanity \
tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_stacklight
-
+ test-pattern-with-contrail:
+ ^heat_tempest_plugin.tests*|^tempest.api.image*|^tempest_horizon*|^tempest.api.identity*|^tempest.api.network*|^tempest.api.compute*|^tempest.api.volume*|^tempest.scenario*|^tempest.api.object_storage*
test_scenario:
- mcp-update-pike-dvr-sl:
deployment: released-heat-cicd-pike-dvr-sl
- disabled: true
run-test-opts: '{test-opt}'
+ tempest_pattern: 'tempest'
display-name: MCP update (pike)
+ tempest_test_suite: "[MCP1.1_PIKE]Tempest"
- mcp-update-queens-dvr-sl:
deployment: released-heat-cicd-queens-dvr-sl
- disabled: true
+ tempest_pattern: 'tempest'
run-test-opts: '{test-opt}'
display-name: MCP update (queens)
+ tempest_test_suite: "[MCP1.1_QUEENS]Tempest"
- mcp-update-pike-contrail-sl:
deployment: released-heat-cicd-pike-contrail41-sl
- disabled: true
+ tempest_pattern: '{test-pattern-with-contrail}'
run-test-opts: '{test-opt-with-contrail}'
display-name: MCP update (pike + OC)
+ tempest_test_suite: "[MCP1.1_PIKE]Tempest"
- mcp-update-queens-contrail-sl:
deployment: released-heat-cicd-queens-contrail41-sl
- disabled: true
+ tempest_pattern: '{test-pattern-with-contrail}'
run-test-opts: '{test-opt-with-contrail}'
display-name: MCP update (queens + OC)
+ tempest_test_suite: "[MCP1.1_QUEENS]Tempest"
- os-update-pike-to-queens:
deployment: heat-cicd-pike-dvr-sl
+ disabled: true
run-test-opts: '-k TestUpdatePikeToQueens'
display-name: Update Pike -> Queens
@@ -256,6 +270,12 @@
name: PASSED_STEPS
trim: 'false'
- string:
+ default: '{tempest_test_suite|}'
+ description: '[MCP1.1_xxx]Tempest where xxx - MCP version
+ using only for report to testrail'
+ name: TEMPEST_TEST_SUITE_NAME
+ trim: 'false'
+ - string:
default: ''
description: 'Example: refs/changes/89/411189/36
(for now - only one reference allowed)'
@@ -268,6 +288,12 @@
Additional pytest options are allowed.
name: RUN_TEST_OPTS
trim: 'false'
+ - text:
+ default: '{tempest_pattern|}'
+ description: |-
+ Examples: 'set=full','set=smoke' or test name.
+ name: TEMPEST_PATTERN
+ trim: 'false'
- bool:
default: true
name: REPORT_TO_TESTRAIL
diff --git a/jobs/view.yaml b/jobs/view.yaml
index c922ad9..69718b6 100644
--- a/jobs/view.yaml
+++ b/jobs/view.yaml
@@ -89,13 +89,16 @@
filter-executors: true
filter-queue: true
job-name:
+ - deploy_bm
- bm-cicd-pike-ovs-maas
- bm-cicd-queens-ovs-maas
- - deploy_bm
- heat-bm-cicd-pike-contrail-sl
- heat-bm-cicd-queens-contrail-sl
- released-bm-pike-ovs
- show_networks_used_by_libvirt
+ - bm-e7-cicd-pike-ovs-maas
+ - bm-e7-cicd-pike-odl-maas
+ - bm-b300-cicd-queens-ovs-maas
columns:
- status
- weather
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index e83a94b..dae2ce4 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -289,6 +289,7 @@
string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
string(name: 'PIPELINE_LIBRARY_REF', value: "${pipeline_library_ref}"),
string(name: 'MK_PIPELINES_REF', value: "${mk_pipelines_ref}"),
+ string(name: 'SALT_MODELS_SYSTEM_REF_CHANGE', value: env.SALT_MODELS_SYSTEM_REF_CHANGE ?: ''),
string(name: 'COOKIECUTTER_TEMPLATE_COMMIT', value: "${cookiecutter_template_commit}"),
string(name: 'SALT_MODELS_SYSTEM_COMMIT', value: "${salt_models_system_commit}"),
string(name: 'COOKIECUTTER_REF_CHANGE', value: "${cookiecutter_ref_change}"),
@@ -445,6 +446,7 @@
string(name: 'DOMAIN_NAME', value: "${LAB_CONFIG_NAME}.local"),
string(name: 'REPOSITORY_SUITE', value: "${env.MCP_VERSION}"),
string(name: 'SALT_MODELS_SYSTEM_COMMIT', value: "${saltmodels_system_commit}"),
+ string(name: 'SALT_MODELS_SYSTEM_REF_CHANGE', value: env.SALT_MODELS_SYSTEM_REF_CHANGE ?: ''),
string(name: 'COOKIECUTTER_TEMPLATE_COMMIT', value: "${cookiecuttertemplate_commit}"),
string(name: 'COOKIECUTTER_REF_CHANGE', value: "${cookiecutter_ref_change}"),
string(name: 'ENVIRONMENT_TEMPLATE_REF_CHANGE', value: "${environment_template_ref_change}"),
@@ -498,6 +500,7 @@
string(name: 'DEPLOY_NETWORK_NETMASK', value: "${deploy_network_mask}"),
string(name: 'PIPELINE_LIBRARY_REF', value: "${pipeline_library_ref}"),
string(name: 'MK_PIPELINES_REF', value: "${mk_pipelines_ref}"),
+ string(name: 'SALT_MODELS_SYSTEM_REF_CHANGE', value: env.SALT_MODELS_SYSTEM_REF_CHANGE ?: ''),
string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
string(name: 'UPDATE_VERSION', value: "${update_version}"),
string(name: 'MCP_COMMON_SCRIPTS_REFS', value: "${mcp_common_scripts_ref}"),
diff --git a/tcp_tests/managers/reclass_manager.py b/tcp_tests/managers/reclass_manager.py
index 7c75086..3952b5f 100644
--- a/tcp_tests/managers/reclass_manager.py
+++ b/tcp_tests/managers/reclass_manager.py
@@ -11,6 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import yaml
from tcp_tests import logger
from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
@@ -83,35 +84,21 @@
:param file_name: name of YAML file to find a key
:return: str, key if found
"""
- LOG.info("Try to get '{key}' key from '{file}' file".format(
+ LOG.debug("Try to get '{key}' key from '{file}' file".format(
file=file_name,
key=key
))
request_key = self.ssh.check_call(
- "{reclass_tools} get-key {key} /srv/salt/reclass/*/{file_name}".
+ "{reclass_tools} get-key {key} "
+ "/srv/salt/reclass/classes/{file_name}".
format(reclass_tools=self.reclass_tools_cmd,
key=key,
file_name=file_name))['stdout']
- # Reclass-tools returns result to stdout, so we get it as
- # ['\n',
- # '---\n',
- # '# Found parameters._param.jenkins_pipelines_branch in \
- # /srv/salt/reclass/classes/cluster/../infra/init.yml\n',
- # 'release/proposed/2019.2.0\n',
- # '...\n',
- # '\n']
- # So we have no chance to get value without dirty code like `stdout[3]`
-
- LOG.info("Raw output from reclass.get_key {}".format(request_key))
- if len(request_key) < 4:
- print("Can't find {key} in {file_name}. Got stdout {stdout}".
- format(key=key,
- file_name=file_name,
- stdout=request_key))
- return None
- value = request_key[3].strip('\n')
- LOG.info("From reclass.get_key {}".format(value))
+ LOG.debug("Raw output from reclass.get_key {}".format(request_key))
+ encoded_request_key = ''.join(request_key).encode(encoding='UTF-8')
+ value = yaml.load(encoded_request_key)
+ LOG.info("From reclass.get_key {}: {}".format(key, value))
return value
def add_bool_key(self, key, value, short_path):
@@ -174,13 +161,18 @@
May look like cluster/*/cicd/control/leader.yml
:return: None
"""
- self.ssh.check_call(
- "{reclass_tools} del-key classes {value} \
- /srv/salt/reclass/classes/{path}".format(
- reclass_tools=self.reclass_tools_cmd,
+ current_content = self.get_key('classes', short_path)
+ if value not in current_content:
+ LOG.info("{value} not found in classes in {path}".format(
value=value,
path=short_path
))
+ return
+
+ new_content = current_content
+ new_content.remove(value)
+
+ self.add_key("classes", new_content, short_path)
def delete_key(self, key, short_path):
"""
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index c2e82d4..105c34d 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -286,6 +286,7 @@
:rtype: list stdout
:raises: devops.error.DevopsCalledProcessError
"""
+ LOG.info("Executing {} on {}".format(cmd, node_name))
remote = self.remote(node_name=node_name, host=host,
address_pool=address_pool)
return remote.check_call(
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index 194b275..32df9cc 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -1,8 +1,7 @@
-# git+git://github.com/openstack/fuel-devops.git@887368d#egg=project[postgre] # Use this requirement for PostgreSQL
mock>=1.2,<4.0.0 # pinning first to avoid dependency meat grinder below
libvirt-python>=3.5.0,<4.1.0 # LGPLv2+
-git+git://github.com/openstack/fuel-devops.git@10f4ac744e89bfefcba3d7d009de82669c52fa6e # Use this requirement for Sqlite3, or if requirements for PostgreSQL are already installed
-git+git://github.com/dis-xcom/fuel-devops-driver-ironic
+git+https://github.com/openstack-archive/fuel-devops.git@10f4ac744e89bfefcba3d7d009de82669c52fa6e # Use this requirement for Sqlite3, or if requirements for PostgreSQL are already installed
+git+https://github.com/dis-xcom/fuel-devops-driver-ironic
paramiko
six
requests>=2.2.0
diff --git a/tcp_tests/templates/_heat_environments/eu-cloud-low-flavors.env b/tcp_tests/templates/_heat_environments/eu-cloud-low-flavors.env
index 0d0c859..abbe367 100644
--- a/tcp_tests/templates/_heat_environments/eu-cloud-low-flavors.env
+++ b/tcp_tests/templates/_heat_environments/eu-cloud-low-flavors.env
@@ -21,12 +21,12 @@
cid_flavor: 8cpu16ram50stor0epf
ntw_flavor: 8cpu16ram50stor0epf
nal_flavor: 8cpu16ram50stor0epf
- dbs_flavor: 8cpu16ram50stor0epf
- msg_flavor: 8cpu16ram50stor0epf
- mon_flavor: 8cpu16ram50stor0epf
+ dbs_flavor: 4cpu8ram40stor0epf
+ msg_flavor: 4cpu4ram50stor0epf
+ mon_flavor: 4cpu4ram50stor0epf
mdb_flavor: 8cpu16ram50stor0epf
log_flavor: 8cpu16ram50stor0epf
- mtr_flavor: 8cpu16ram50stor0epf
+ mtr_flavor: 4cpu4ram50stor0epf
cmp_flavor: 4cpu8ram100stor10epf
cmn_flavor: 4cpu8ram40stor0epf
rgw_flavor: 4cpu8ram40stor0epf
@@ -40,7 +40,7 @@
vsrx_flavor: oc_vsrx
key_pair: system_key_8133
- bm_availability_zone: nova
+ bm_availability_zone: vlan-provider-net-az
vm_availability_zone: nova
net_public: public
diff --git a/tcp_tests/templates/_heat_environments/eu-cloud.env b/tcp_tests/templates/_heat_environments/eu-cloud.env
index 7b77db1..d5e0e5a 100644
--- a/tcp_tests/templates/_heat_environments/eu-cloud.env
+++ b/tcp_tests/templates/_heat_environments/eu-cloud.env
@@ -44,7 +44,7 @@
vsrx_flavor: oc_vsrx
key_pair: system_key_8133
- bm_availability_zone: nova
+ bm_availability_zone: vlan-provider-net-az
vm_availability_zone: nova
net_public: public
diff --git a/tcp_tests/templates/_heat_environments/us-cloud-low-flavors.env b/tcp_tests/templates/_heat_environments/us-cloud-low-flavors.env
index ad4290e..1af6209 100644
--- a/tcp_tests/templates/_heat_environments/us-cloud-low-flavors.env
+++ b/tcp_tests/templates/_heat_environments/us-cloud-low-flavors.env
@@ -21,12 +21,12 @@
cid_flavor: 8cpu16ram50stor0epf
ntw_flavor: 8cpu16ram50stor0epf
nal_flavor: 8cpu16ram50stor0epf
- dbs_flavor: 8cpu16ram50stor0epf
- msg_flavor: 8cpu16ram50stor0epf
- mon_flavor: 8cpu16ram50stor0epf
+ dbs_flavor: 4cpu8ram40stor0epf
+ msg_flavor: 4cpu4ram50stor0epf
+ mon_flavor: 4cpu4ram50stor0epf
mdb_flavor: 8cpu16ram50stor0epf
log_flavor: 8cpu16ram50stor0epf
- mtr_flavor: 8cpu16ram50stor0epf
+ mtr_flavor: 4cpu4ram50stor0epf
cmp_flavor: 4cpu8ram100stor10epf
cmn_flavor: 4cpu8ram40stor0epf
rgw_flavor: 4cpu8ram40stor0epf
diff --git a/tcp_tests/templates/_packer/scripts/jenkins_virtualenvs.sh b/tcp_tests/templates/_packer/scripts/jenkins_virtualenvs.sh
index 1e0d43e..70cd37f 100644
--- a/tcp_tests/templates/_packer/scripts/jenkins_virtualenvs.sh
+++ b/tcp_tests/templates/_packer/scripts/jenkins_virtualenvs.sh
@@ -35,7 +35,7 @@
. ${TESTMARKER_VENV_PATH}/bin/activate
#pip install git+https://github.com/ibumarskov/testrail-reporter -U
# Pull from review to test changes in testrail-reporter before their merging
-pip install git+https://review.gerrithub.io/ibumarskov/testrail-reporter@refs/changes/94/514594/3
+pip install git+https://review.gerrithub.io/ibumarskov/testrail-reporter@refs/changes/94/514594/8
deactivate
if [ "$CHANGE_RIGHTS" = true ]; then
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/encryption-key.asc b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/encryption-key.asc
new file mode 100644
index 0000000..381eb77
--- /dev/null
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/encryption-key.asc
@@ -0,0 +1,56 @@
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+
+lQcYBFyBRcUBEACmP/muUIwbEg6Z7dA3c9I2NadcjDHXKg/ViXwaOB4KSd9/FC0o
+KSBPccWb+1sm+zdUy2f/LC5r8RvU7yZd4Mbzz8R1DQncXg4nG7bppW7oAcHpc0jk
+pV/SvdMYxuXsrbKbpoGEquwVkbb4oTv2MLSBfGfFzBeQfiwGEWm1xPLSeXc4biLC
+FatCU7w4LS1U4BEOqRCp6lW/hQFLoX+j6rNT8TwC5AeFpKgUWuQZGOO4fZKpbvo2
+sCvF5VA1HSVXlZtzum6pL1yzLL/SoyLrYOy1KrZQmSBHG9htCZQVmvYK7U5WtWE4
+Ws5IAj+HwvgKyzXE2Srsirj1NqauQRsk+1riQk3rpDrX2BeXNUSoHR5M/RDY0gCc
+8P6heanQRnyFtjUSoovkQsydY77+QVxe0MCs+lZlg31fL+wJVG7FIbIKKwR5sj8i
+/JqhWE+t2ZzIrQ/7o7fRk7hv/u69Vb/t/Nt7fkbn53zoubqi3kNgXf6hwhTUtfW/
+lE9cc4JTzis4i/RnILUDnAwos1c0Z+tGCUo4pbiP71VfU8L259g+clPFXOIkqA9t
+L9JSZQfhH/lRj3Abs57OvZjN7/D1h8PWB+8nTB8bkoUt45SubgQb0Y9maWUcwjxw
+AcJSIk6mq8vVdBu7zOuslDjMnoUZbtJwcSwQQOnb9UUppjs3CjbcH80ttQARAQAB
+AA/9ExdprtDlJf6u2pJqxNNyInOK4p/e4VydMOJ28/PZz0iod8lzXhdK9JSWItF8
+qD9VHVG2gaErO44Wqh9EgqdbcYg8gUycA0hxy5/tI2uyDsaU5CAvEMLE/Eh8Q24j
+3UgdKK64VOnj7p4rKuYpIp55PB1zNU24rwkuOQwq3Yreb7kvLbXIHA2s+xLunGzj
+tcl9a/eSSFD2w+WcPnkvVT2QlmUvhQ12p6w++QdvBkrLa9ZPz1FFPp6AiFtLGK5e
+KW6uyV1xc9BSjujmpmPBkNIynKNpCFxtTn0uH2doMAr5kkuqIV726SfUZISNkyOa
+pHKtnAtsWHmdv9skzQIBAgAzcXTBGbdDxRj6QR+ohqbsCzfu3z9QHSbXUmxezti9
+bQqpsU1SIg8z2oDARFR6KlRzhnfpPvan+Gp9TvYsvxrXe61HpxRMdLj6Gt2Ibruf
+YHCtr1S9J5CzTTOurlIKpACUYIqgVXfgIkQzqiYX8e56PiDTUB++OqEg66i0orXB
+nbHAD2vu16CNvcaNqsak3DWkHMwmEfsuxqyUXNte0eYu9SCHtnNoYT/D7A72gK4b
+Gqg80J8ZCpo1ilIX3xUq8WsH+CoXs0X7hy6Cbi22AqnHFRYmrgoIWmRzJonp393b
+yqmTV+QsKQRpmwdX4hiH78zJLnLEUQMn8CuHAGwaJCzk4okIAMKNrIQZhkdbCCe4
+IrLuMKn4aQj3c22SMXNmu78/0cP9Rtsm3ChjzzelLO7NjvPm0nIvEcThFSIZIXCv
+iWGZCXFCKn3WtA5xWuMFNXsEQcc3AG/qRODdDSeFpo+VH/9IwppAc3zI2jxe1PRD
+G2DnheLaLIKgHunsCYxpftJDod/vRqRHeU7ulMVJfEKVxdzrCbKGiIOXSyS6KowQ
+JOxF/80ocq/25Zc/oH25Y2r/0y+xzDpOHBgU0ndrCZf2z8oOuECJTxcq83UDyJzT
+HrG/hTrU83YsQMZ0AwBrYxpzUfdH7b6y60VE19FrwmMDK6Fz8I/x4Ai0sNkI3QLR
+NntY9fsIANrB3QM8CtsdxXsFvdTEwNLsG8LMdn3loCH6Cq3ejkEKa69Uua+sB6ND
+wYOXWzyksLZJyfxIXux/hMlK/kO3ohGcEFiMUaDZndJy8IKUlDrhwcUZqm7dXMDU
+CIf0T3rOEzOXbNu3UTds3j/ruSvA5KmjzOa4Qnb41CyL5Fh7x0R8Rux3NzAn6Ecx
+Y+nAWRtI/Yz7zdL8zuHaJfbVuxAPJ+ImcXAS7cX6T9dM3tWRlam1+0Ezhdb4F8i5
+lcY7sMu95scDwhV7qOmln6wtGSkBPZgE0+TqRuELZrPvlcIRRIM42UwPWhYO2PG8
+kKd2i5teweDnhzN8+E87VV2BQhP9DA8H/0+ZiXsvaG60JGqNmWzVbB6U1qgwrFOR
+VcuzIWpdZyQR8Ok63GXuA0odoqReolba9R6fVlXchj6INBz2WY2F0twwCRPx7tRg
+Pyq4PaTA8ZYYjAVWVCd9k97gY2i80p4MPzQCnE8g4n6OWGY47pcTwSkm4HBoGoam
+igIRn3Soz7CXGF+PvSGi1T0jpwM5IWfM3IwEUPdPTIJuA2iD/9zSKDvhsP+trJ1Y
+TMe9CW3Llf5mFbHLRZ7LfMOLIngKOIxBAxHiT8wUrIRaH78wHdz8ALDsC+LNP6rK
+hKb8h/VHXaqmf0BlNjGpO7XZXfxXWJ0oTUG5Z+jKz2Ir14HYLZI1GlOA8bQlZXhh
+bXBsZS5jb20gPHNhbHQtbWFzdGVyQGV4YW1wbGUuY29tPokCTgQTAQgAOBYhBLaR
+Vrvqyq56MiGjUvXLKtw2FZsDBQJcgUXFAhsvBQsJCAcCBhUKCQgLAgQWAgMBAh4B
+AheAAAoJEPXLKtw2FZsDpi4P/1kmvlpkbOhrL73zAPyMzYa4Yo2Pi/BoMbyEKNKO
+K3wLCdP6xLGecVIt8pANosksDSGlWAnWj36/jfgt/aZisx1u6MTYaOEHkXahxOX4
+ghDW1cTbdtz7Uy5Ah9O3WNI+ejmOpCtuc3P/XOkdttKZLuCNCs6ocgCsejpNHcFK
+vMhOhnRKV8kcBrG2QLyfSyafBtM/zV+NR4Wrng71Za8fiXHlDanmrAIyuSnD538r
+hTwSFe0C9HntwuF6W+UShN7c+jPJaKQjKbZy9fuFp33NcTSPCB5dH9yrhQvOeFQo
+dFzEabMDFVGPfUVWR+TH39dWYOsq5zFmgQAbOB/vHdmEtrYNrxX0AiCZZHQHTUb9
+oBK68V8eVeFdoRLcMORBZ2RCqkQTOQoAF7o772knltjtsymnI0XNvVC/XCnZv89Q
+/eoivrd/rMMpTFOGcys6EAnSUWx0ZG/JCkezQqnx9U219BvqKNOZ60aOeOYHKpsX
+Ha8Nr72YRmtm0UMsDjEUyLOj+o06XnN7uafMv2bZpjWh2hfOrkAbxe41z6t+78ho
+P+C5vSvp01OmAt71iq+62MXVcLVKEWDpiuZSj8m83RlY5AGIaPaGX9LKPcHdGxKw
+QSczgB/jI3G08vWaq82he6UJuYexbYe1iJXfvcx8kThwZ1nXQJm+7UsISUsh8/NZ
+x0n/
+=uxDD
+-----END PGP PRIVATE KEY BLOCK-----
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
new file mode 100644
index 0000000..0ddb228
--- /dev/null
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -0,0 +1,876 @@
+default_context:
+ ironic_enabled: True
+ openstack_baremetal_hostname: bmt
+ openstack_baremetal_address_baremetal: 10.14.0.10
+ openstack_baremetal_node01_baremetal_address: 10.14.0.11
+ openstack_baremetal_node02_baremetal_address: 10.14.0.12
+ openstack_baremetal_node03_baremetal_address: 10.14.0.13
+ openstack_baremetal_node01_address: 10.167.11.21
+ openstack_baremetal_node02_address: 10.167.11.22
+ openstack_baremetal_node03_address: 10.167.11.23
+ openstack_baremetal_neutron_subnet_cidr: 10.14.0.0/24
+ openstack_baremetal_neutron_subnet_allocation_start: 10.14.0.100
+ openstack_baremetal_neutron_subnet_allocation_end: 10.14.0.200
+ openstack_baremetal_address: 10.167.11.20
+ openstack_baremetal_interface: ens7
+ openstack_baremetal_vip_interface: br_baremetal
+ jenkins_cfg_admin_password: r00tme
+ bmk_enabled: 'False'
+ cicd_control_node01_address: 10.167.11.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.11.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.11.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.11.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cluster_domain: bm-b300-cicd-queens-ovs-maas.local
+ cluster_name: bm-b300-cicd-queens-ovs-maas
+ compute_bond_mode: active-backup
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: zEFbUBMME6LFdiL0rJWFgHMdQGgywnDSE9vFYvHgEBeYHb4QJsDl3HqpdaTgqYlF
+ control_network_netmask: 255.255.254.0
+ control_network_subnet: 10.167.11.0/23
+ control_vlan: '2404'
+
+ jenkins_pipelines_branch: 'release/2019.2.0'
+ deploy_network_gateway: 172.16.180.1
+ deploy_network_netmask: 255.255.254.0
+ deploy_network_subnet: 172.16.180.0/23
+ deployment_type: physical
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.224.6
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.11.241
+ infra_kvm01_deploy_address: 172.16.180.3
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.11.242
+ infra_kvm02_deploy_address: 172.16.180.4
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.11.243
+ infra_kvm03_deploy_address: 172.16.180.5
+ infra_kvm03_hostname: kvm03
+ infra_kvm04_control_address: 10.167.11.244
+ infra_kvm04_deploy_address: 172.16.180.6
+ infra_kvm04_hostname: kvm04
+ infra_kvm05_control_address: 10.167.11.245
+ infra_kvm05_deploy_address: 172.16.180.7
+ infra_kvm05_hostname: kvm05
+ infra_kvm06_control_address: 10.167.11.246
+ infra_kvm06_deploy_address: 172.16.180.8
+ infra_kvm06_hostname: kvm06
+ infra_kvm_vip_address: 10.167.11.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_enabled: 'True'
+ maas_deploy_address: 172.16.180.2
+ maas_deploy_cidr: 172.16.180.0/23
+ maas_deploy_gateway: 172.16.180.1
+ maas_deploy_range_end: 172.16.181.250
+ maas_deploy_range_start: 172.16.180.18
+ maas_dhcp_enabled: 'True'
+ maas_fabric_name: fabric-0
+ maas_hostname: cfg01
+ maas_manage_deploy_network: 'True'
+ maas_machines: |
+ kvm01: # #cz7625-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ # pxe_interface_mac:
+ pxe_interface_mac: "0c:c4:7a:33:24:be"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:33:24:be"
+ mode: "static"
+ ip: "172.16.180.3"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:33:24:bf"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:01:3e"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:01:3f"
+ name: ten2
+ sten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:58:f3:ce"
+ name: sten1
+ sten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:58:f3:cf"
+ name: sten2
+ power_parameters:
+ power_address: "185.8.59.227"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm02: # #cz7627-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:33:2d:6a"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:33:2d:6a"
+ mode: "static"
+ ip: "172.16.180.4"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:33:2d:6b"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:58:43:b8"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:58:43:b9"
+ name: ten2
+ sten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1d:96:02"
+ name: sten1
+ sten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1d:96:03"
+ name: sten2
+ power_parameters:
+ power_address: "185.8.59.229"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm03: # #cz7756-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:69:a0:4c"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:69:a0:4c"
+ mode: "static"
+ ip: "172.16.180.5"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:69:a0:4d"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "00:25:90:c0:c2:14"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "00:25:90:c0:c2:15"
+ name: ten2
+ sten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:09:c2"
+ name: sten1
+ sten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:09:c3"
+ name: sten2
+ power_parameters:
+ power_address: "5.43.225.88"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm04: # #cz7792-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ # pxe_interface_mac:
+ pxe_interface_mac: "0c:c4:7a:6c:83:5c"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:6c:83:5c"
+ mode: "static"
+ ip: "172.16.180.6"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:6c:83:5d"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1f:7d:98"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1f:7d:99"
+ name: ten2
+ sten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:03:de"
+ name: sten1
+ sten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:03:df"
+ name: sten2
+ power_parameters:
+ power_address: "5.43.225.112"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm05: # #cz7876-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6c:88:d6"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:6c:88:d6"
+ mode: "static"
+ ip: "172.16.180.7"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:6c:88:d7"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:03:74"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:03:75"
+ name: ten2
+ sten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1f:89:be"
+ name: sten1
+ sten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1f:89:bf"
+ name: sten2
+ power_parameters:
+ power_address: "5.43.225.208"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ kvm06: # #cz8073-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:aa:df:ac"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:aa:df:ac"
+ mode: "static"
+ ip: "172.16.180.8"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:aa:df:ad"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:3a:f2"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:3a:f3"
+ name: ten2
+ sten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:a6:4c"
+ name: sten1
+ sten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:a6:4d"
+ name: sten2
+ power_parameters:
+ power_address: "5.43.227.118"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ gtw01: # #cz9039-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:aa:d5:84"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:aa:d5:84"
+ mode: "static"
+ ip: "172.16.180.9"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:aa:d5:85"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:58:41:b0"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:58:41:b1"
+ name: ten2
+ sten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1d:90:d2"
+ name: sten1
+ sten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1d:90:d3"
+ name: sten2
+ power_parameters:
+ power_address: "5.43.229.28"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ gtw02: # #cz9048-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:aa:d5:82"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:aa:d5:82"
+ mode: "static"
+ ip: "172.16.180.10"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:aa:d5:83"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1f:00:7c"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1f:00:7d"
+ name: ten2
+ sten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:bc:88:8a"
+ name: sten1
+ sten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:bc:88:8b"
+ name: sten2
+ power_parameters:
+ power_address: "5.43.225.23"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ gtw03: # #cz8159-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:6c:bc:f6"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:6c:bc:f6"
+ mode: "static"
+ ip: "172.16.180.11"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:6c:bc:f7"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "00:25:90:9b:cc:32"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "00:25:90:9b:cc:33"
+ name: ten2
+ sten1:
+ mode: UNCONFIGURED
+ mac: "00:25:90:c1:a5:04"
+ name: sten1
+ sten2:
+ mode: UNCONFIGURED
+ mac: "00:25:90:c1:a5:05"
+ name: sten2
+ power_parameters:
+ power_address: "185.8.58.9"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd001: # #cz9040-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:aa:c9:02"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:aa:c9:02"
+ mode: "static"
+ ip: "172.16.180.12"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:aa:c9:03"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:aa:90"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:aa:91"
+ name: ten2
+ sten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:0a:a4"
+ name: sten1
+ sten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:0a:a5"
+ name: sten2
+ power_parameters:
+ power_address: "185.8.58.246"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd002: # #cz9041-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:aa:d5:60"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:aa:d5:60"
+ mode: "static"
+ ip: "172.16.180.13"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:aa:d5:61"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1f:04:2c"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1f:04:2d"
+ name: ten2
+ sten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1f:01:f2"
+ name: sten1
+ sten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1f:01:f3"
+ name: sten2
+ power_parameters:
+ power_address: "185.8.58.243"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ osd003: # #cz9042-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:aa:c9:3a"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:aa:c9:3a"
+ mode: "static"
+ ip: "172.16.180.14"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:aa:c9:3b"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "00:25:90:33:d7:10"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "00:25:90:33:d7:11"
+ name: ten2
+ sten1:
+ mode: UNCONFIGURED
+ mac: "00:25:90:0b:5f:50"
+ name: sten1
+ sten2:
+ mode: UNCONFIGURED
+ mac: "00:25:90:0b:5f:51"
+ name: sten2
+ power_parameters:
+ power_address: "185.8.58.244"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp001: # #cz9039-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:aa:d6:aa"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:aa:d6:aa"
+ mode: "static"
+ ip: "172.16.180.15"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:aa:d6:ab"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1f:86:76"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1f:86:77"
+ name: ten2
+ sten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:39:3c"
+ name: sten1
+ sten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:39:3d"
+ name: sten2
+ power_parameters:
+ power_address: "185.8.58.248"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp002: # #cz9046-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:aa:ce:30"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:aa:ce:30"
+ mode: "static"
+ ip: "172.16.180.16"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:aa:ce:31"
+ name: one2
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:aa:ce:31"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "00:25:90:e0:7d:e0"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "00:25:90:e0:7d:e1"
+ name: ten2
+ sten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1f:0c:0e"
+ name: sten1
+ sten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1f:0c:0f"
+ name: sten2
+ power_parameters:
+ power_address: "185.8.59.222"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp003: # #cz8061-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:aa:e0:ce"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:aa:e0:ce"
+ mode: "static"
+ ip: "172.16.180.17"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:aa:e0:cf"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1d:94:5e"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1d:94:5f"
+ name: ten2
+ sten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1f:87:e4"
+ name: sten1
+ sten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1f:87:e5"
+ name: sten2
+ power_parameters:
+ power_address: "5.43.225.228"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ mcp_version: proposed
+ mcp_docker_registry: docker-prod-local.docker.mirantis.net
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openldap_domain: bm-b300-cicd-queens-ovs-maas.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ openstack_benchmark_node01_address: 10.167.11.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: golden
+ openstack_compute_count: '3'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_single_address_ranges: 10.167.11.15-10.167.11.17
+ openstack_compute_deploy_address_ranges: 172.16.180.15-172.16.180.17
+ openstack_compute_tenant_address_ranges: 10.167.12.15-10.167.12.17
+ openstack_compute_backend_address_ranges: 10.167.12.15-10.167.12.17
+ openstack_control_address: 10.167.11.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.11.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.11.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.11.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.11.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.11.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.11.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.11.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_deploy_address: 172.16.180.9
+ openstack_gateway_node02_deploy_address: 172.16.180.10
+ openstack_gateway_node03_deploy_address: 172.16.180.11
+ openstack_gateway_node01_address: 10.167.11.224
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node01_tenant_address: 10.167.12.9
+ openstack_gateway_node02_address: 10.167.11.225
+ openstack_gateway_node02_tenant_address: 10.167.12.10
+ openstack_gateway_node03_address: 10.167.11.226
+ openstack_gateway_node03_tenant_address: 10.167.12.11
+ openstack_message_queue_address: 10.167.11.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.11.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.11.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.11.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: ovs
+ openstack_neutron_qos: 'True'
+ openstack_neutron_vlan_aware_vms: 'True'
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_hugepages_count: '16'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_nfv_sriov_network: physnet2
+ openstack_nfv_sriov_numvfs: '7'
+ openstack_nfv_sriov_pf_nic: enp5s0f1
+ openstack_nova_cpu_pinning: 6,7,8,9,10,11
+ openstack_nova_compute_reserved_host_memory_mb: '900'
+ openstack_ovs_dvr_enabled: 'True'
+ openstack_ovs_encapsulation_type: vxlan
+ openstack_proxy_address: 10.167.11.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.11.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.11.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.11.19
+ openstack_version: queens
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ salt_api_password: HlcaUHzUnsWsg62uhF8ua5KEbqRbzijz
+ salt_api_password_hash: $6$qdIFillN$XnzP7oIXRcbroVch7nlthyrSekjKlWND8q2MtoMF3Wz2ymepjAOjyqpyR55nmbH9OQzS8EcQJ6sfr5hWKDesV1
+ salt_master_address: 10.167.11.5
+ salt_master_hostname: cfg01
+ salt_master_management_address: 172.16.180.2
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.11.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.11.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.11.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.11.63
+ stacklight_log_node03_hostname: log03
+ stacklight_long_term_storage_type: prometheus
+ stacklight_monitor_address: 10.167.11.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.11.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.11.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.11.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.11.96
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.11.97
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.11.98
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.11.99
+ stacklight_telemetry_node03_hostname: mtr03
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.167.12.1
+ tenant_network_netmask: 255.255.254.0
+ tenant_network_subnet: 10.167.12.0/23
+ tenant_vlan: '2406'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
+ ceph_enabled: 'True'
+ ceph_version: "nautilus"
+ ceph_hyper_converged: "False"
+ rbd_monitoring_enabled: 'True'
+ rbd_pool_stats_gnocchi: 'True'
+ rbd_pool_stats_volumes: 'True'
+ rbd_pool_stats_images: 'True'
+ rbd_pool_stats_backups: 'True'
+ rbd_pool_stats_vms : 'True'
+ # Apply settings for ceph from contexts/ceph/nautilus-collocated-block-db.yml
+ ceph_osd_backend: "bluestore"
+ ceph_osds_per_device: '1'
+ ceph_osd_data_size: ''
+ ceph_osd_dmcrypt: False
+ ceph_osd_count: "3"
+ ceph_osd_node_count: 3
+ ceph_osd_block_db_size: 20
+ ceph_osd_journal_size: 20
+ ceph_osd_bond_mode: "active-backup"
+ ceph_osd_data_partition_prefix: ""
+ ceph_public_network_allocation: storage
+ ceph_cluster_network: "10.167.11.0/24"
+ ceph_osd_single_address_ranges: "10.167.11.200-10.167.11.202"
+ ceph_osd_deploy_address_ranges: "172.16.180.8-172.16.180.10"
+ ceph_osd_storage_address_ranges: "10.167.11.200-10.167.11.202"
+ ceph_osd_backend_address_ranges: "10.167.12.200-10.167.12.202"
+
+ ceph_public_network: 10.167.11.0/24
+
+ ceph_osd_data_disks: "/dev/sdb"
+ ceph_osd_journal_or_block_db_disks: ""
+ ceph_osd_block_partition_prefix: ''
+ ceph_osd_mode: "separated"
+ ceph_osd_deploy_nic: "eth0"
+ ceph_osd_primary_first_nic: "eth1"
+ ceph_osd_primary_second_nic: "eth2"
+ ceph_mon_node01_address: "10.167.11.66"
+ ceph_mon_node01_hostname: "cmn01"
+ ceph_mon_node02_address: "10.167.11.67"
+ ceph_mon_node02_hostname: "cmn02"
+ ceph_mon_node03_address: "10.167.11.68"
+ ceph_mon_node03_hostname: "cmn03"
+ ceph_rgw_address: "10.167.11.75"
+ ceph_rgw_node01_address: "10.167.11.76"
+ ceph_rgw_node01_hostname: "rgw01"
+ ceph_rgw_node02_address: "10.167.11.77"
+ ceph_rgw_node02_hostname: "rgw02"
+ ceph_rgw_node03_address: "10.167.11.78"
+ ceph_rgw_node03_hostname: "rgw03"
+ rsync_fernet_rotation: 'True'
+ compute_padding_with_zeros: True
+ designate_backend: powerdns
+ designate_enabled: 'True'
+ openstack_dns_node01_address: 10.167.11.113
+ openstack_dns_node02_address: 10.167.11.114
+ nova_vnc_tls_enabled: 'True'
+ galera_ssl_enabled: 'True'
+ openstack_mysql_x509_enabled: 'True'
+ rabbitmq_ssl_enabled: 'True'
+ openstack_rabbitmq_x509_enabled: 'True'
+ openstack_rabbitmq_standalone_mode: 'True'
+ openstack_internal_protocol: 'https'
+ tenant_telemetry_enabled: 'True'
+ gnocchi_aggregation_storage: ceph
+ openstack_telemetry_address: 10.167.11.83
+ openstack_telemetry_hostname: mdb
+ openstack_telemetry_node01_address: 10.167.11.84
+ openstack_telemetry_node01_hostname: mdb01
+ openstack_telemetry_node02_address: 10.167.11.85
+ openstack_telemetry_node02_hostname: mdb02
+ openstack_telemetry_node03_address: 10.167.11.86
+ openstack_telemetry_node03_hostname: mdb03
+ barbican_backend: dogtag
+ barbican_enabled: 'True'
+ barbican_integration_enabled: 'False'
+ openstack_barbican_address: 10.167.11.44
+ openstack_barbican_hostname: kmn
+ openstack_barbican_node01_address: 10.167.11.45
+ openstack_barbican_node01_hostname: kmn01
+ openstack_barbican_node02_address: 10.167.11.46
+ openstack_barbican_node02_hostname: kmn02
+ openstack_barbican_node03_address: 10.167.11.47
+ openstack_barbican_node03_hostname: kmn03
+ openstack_create_public_network: 'True'
+ openstack_public_neutron_subnet_gateway: 172.17.42.1
+ openstack_public_neutron_subnet_cidr: 172.17.42.0/26
+ openstack_public_neutron_subnet_allocation_start: 172.17.42.20
+ openstack_public_neutron_subnet_allocation_end: 172.17.42.55
+ backend_vlan: '2402'
+ manila_enabled: 'False'
+ openscap_enabled: 'True'
+ octavia_health_manager_node01_address: 192.168.1.10
+ octavia_health_manager_node02_address: 192.168.1.11
+ octavia_health_manager_node03_address: 192.168.1.12
+ octavia_manager_cluster: 'True'
+ octavia_amphora_topology: 'ACTIVE_STANDBY'
+ octavia_spare_amphora_pool_size: 1
+ octavia_lb_mgmt_cidr: 192.168.1.0/24
+ octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
+ octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
+ openstack_octavia_enabled: 'True'
+ secrets_encryption_enabled: 'True'
+ secrets_encryption_key_id: 'F5CB2ADC36159B03'
+ # Used on CI only.
+ secrets_encryption_private_key: ''
+ cinder_backup_engine: 'ceph'
+ cinder_ceph_backup_pool_name: 'backups'
+ openstack_keystone_security:
+ disable_user_account_days_inactive: 7
+ lockout_failure_attempts: 60
+ lockout_duration: 600
+ password_expires_days: 730
+ unique_last_password_count: 5
+ minimum_password_age: 0
+ password_regex: "'^[a-zA-Z0-9~!@#%^&\\*_=+]{32,}$$'"
+ password_regex_description: "Your password could contains capital letters, lowercase letters, digits, symbols '~ ! @ # % ^ & * _ = +' and have a minimum length of 32 characters"
+ change_password_upon_first_use: False
+ stacklight_ssl_enabled: 'True'
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt-context-environment.yaml b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt-context-environment.yaml
new file mode 100644
index 0000000..eec4779
--- /dev/null
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt-context-environment.yaml
@@ -0,0 +1,200 @@
+nodes:
+ cfg01.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ - features_runtest
+ interfaces:
+ ens3:
+ role: single_static_mgm
+ ens4:
+ role: single_static_ctl
+
+ # Physical nodes
+ kvm01.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ one1:
+ role: single_mgm_dhcp
+ ten2:
+ role: bond_ctl_lacp
+ sten2:
+ role: bond_ctl_lacp
+ ten1:
+ role: bond_baremetal_lacp
+ sten1:
+ role: bond_baremetal_lacp
+
+ kvm02.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ one1:
+ role: single_mgm_dhcp
+ ten2:
+ role: bond_ctl_lacp
+ sten2:
+ role: bond_ctl_lacp
+ ten1:
+ role: bond_baremetal_lacp
+ sten1:
+ role: bond_baremetal_lacp
+
+ kvm03.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ one1:
+ role: single_mgm_dhcp
+ ten2:
+ role: bond_ctl_lacp
+ sten2:
+ role: bond_ctl_lacp
+ ten1:
+ role: bond_baremetal_lacp
+ sten1:
+ role: bond_baremetal_lacp
+
+ kvm04.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: infra_kvm_node04
+ roles:
+ - infra_kvm_wo_gluster
+ - linux_system_codename_xenial
+ - salt_master_host
+ interfaces:
+ one1:
+ role: single_mgm_dhcp
+ ten2:
+ role: bond_ctl_lacp
+ sten2:
+ role: bond_ctl_lacp
+ ten1:
+ role: bond_baremetal_lacp
+ sten1:
+ role: bond_baremetal_lacp
+
+ kvm05.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: infra_kvm_node05
+ roles:
+ - infra_kvm_wo_gluster
+ - linux_system_codename_xenial
+ - salt_master_host
+ interfaces:
+ one1:
+ role: single_mgm_dhcp
+ ten2:
+ role: bond_ctl_lacp
+ sten2:
+ role: bond_ctl_lacp
+ ten1:
+ role: bond_baremetal_lacp
+ sten1:
+ role: bond_baremetal_lacp
+
+ kvm06.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: infra_kvm_node06
+ roles:
+ - infra_kvm_wo_gluster
+ - linux_system_codename_xenial
+ - salt_master_host
+ interfaces:
+ one1:
+ role: single_mgm_dhcp
+ ten2:
+ role: bond_ctl_lacp
+ sten2:
+ role: bond_ctl_lacp
+ ten1:
+ role: bond_baremetal_lacp
+ sten1:
+ role: bond_baremetal_lacp
+
+ osd<<count>>:
+ reclass_storage_name: ceph_osd_rack01
+ roles:
+ - ceph_osd
+ - linux_system_codename_xenial
+ interfaces:
+ one1:
+ role: single_mgm_dhcp
+ ten2:
+ role: bond_ctl_lacp
+ sten2:
+ role: bond_ctl_lacp
+# role: bond0_ab_vlan_ceph_storage_backend
+
+ cmp<<count>>:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ one1:
+ role: single_mgm_dhcp
+ ten1:
+ role: bond_ctl_lacp
+ ten2:
+ role: bond_prv_lacp
+ sten1:
+ role: bond_ctl_lacp
+ sten2:
+ role: bond_prv_lacp
+
+ gtw01.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ one1:
+ role: single_mgm_dhcp
+ ten1:
+ role: bond_ctl_baremetal_lacp
+ ten2:
+ role: bond_prv_lacp
+ sten1:
+ role: bond_ctl_baremetal_lacp
+ sten2:
+ role: bond_prv_lacp
+
+ gtw02.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_gateway_node02
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ one1:
+ role: single_mgm_dhcp
+ ten1:
+ role: bond_ctl_baremetal_lacp
+ ten2:
+ role: bond_prv_lacp
+ sten1:
+ role: bond_ctl_baremetal_lacp
+ sten2:
+ role: bond_prv_lacp
+
+ gtw03.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_gateway_node03
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ one1:
+ role: single_mgm_dhcp
+ ten1:
+ role: bond_ctl_baremetal_lacp
+ ten2:
+ role: bond_prv_lacp
+ sten1:
+ role: bond_ctl_baremetal_lacp
+ sten2:
+ role: bond_prv_lacp
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt-context-vcp-environment.yaml b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt-context-vcp-environment.yaml
new file mode 100644
index 0000000..f9671d6
--- /dev/null
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt-context-vcp-environment.yaml
@@ -0,0 +1,448 @@
+nodes:
+ ctl01.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - linux_system_codename_xenial
+ - features_ironic_baremetal_nodes
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl02.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl03.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs01.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs02.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs03.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg01.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg02.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg03.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ prx01.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ prx02.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid01.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid02.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid03.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon01.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon02.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mon03.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr01.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr02.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mtr03.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log01.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log02.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ log03.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn01.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: ceph_mon_node01
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn02.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: ceph_mon_node02
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cmn03.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: ceph_mon_node03
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw01.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: ceph_rgw_node01
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw02.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: ceph_rgw_node02
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ rgw03.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: ceph_rgw_node03
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mdb01.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_telemetry_node01
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mdb02.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_telemetry_node02
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ mdb03.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_telemetry_node03
+ roles:
+ - linux_system_codename_xenial
+ - openstack_telemetry
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dns01.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_dns_node01
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dns02.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_dns_node02
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ kmn01.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_barbican_node01
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ kmn02.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_barbican_node02
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ kmn03.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_barbican_node03
+ roles:
+ - openstack_barbican
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ bmt01.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_baremetal_node01
+ roles:
+ - openstack_baremetal
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+ ens4:
+ role: single_baremetal
+
+ bmt02.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_baremetal_node02
+ roles:
+ - openstack_baremetal
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+ ens4:
+ role: single_baremetal
+
+ bmt03.bm-b300-cicd-queens-ovs-maas.local:
+ reclass_storage_name: openstack_baremetal_node03
+ roles:
+ - openstack_baremetal
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+ ens4:
+ role: single_baremetal
+
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt.yaml b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt.yaml
new file mode 100644
index 0000000..877ee47
--- /dev/null
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt.yaml
@@ -0,0 +1,85 @@
+{% set HOSTNAME_CFG01='cfg01.bm-b300-cicd-queens-ovs-maas.local' %}
+{% set LAB_CONFIG_NAME='bm-b300-cicd-queens-ovs-maas' %}
+{% set DOMAIN_NAME='bm-b300-cicd-queens-ovs-maas.local' %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-test-tools.yaml' as SHARED_TEST_TOOLS with context %}
+{% import 'shared-workarounds.yaml' as SHARED_WORKAROUNDS with context %}
+{% import 'shared-maas.yaml' as SHARED_MAAS with context %}
+
+- description: Wait for salt-master is ready after configdrive user-data
+ cmd: |
+ timeout 120 salt-call test.ping
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: Generate a public key for machines in MAAS
+ cmd: |
+ ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub
+ maas mirantis sshkeys create key="$(cat ~root/.ssh/id_rsa.pub)"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Run comissioning of BM nodes
+ cmd: |
+ salt-call maas.process_machines
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Wait for machines ready
+ cmd: |
+ salt-call maas.machines_status && timeout 120 salt-call state.sls maas.machines.wait_for_ready
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 7, delay: 5}
+ skip_fail: false
+
+- description: Enforce the interfaces configuration defined in the model for servers
+ cmd: |
+ salt-call state.sls maas.machines.assign_ip;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Assign drive partitions to machines
+ cmd: salt-call state.sls maas.machines.storage
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Remove all the salt-minions and re-register the cfg01 minion
+ cmd: |
+ salt-key -y -D;
+ salt-call test.ping
+ sleep 5
+ # Check that the cfg01 is registered
+ salt-key | grep cfg01
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: provision the automatically commissioned physical nodes through MAAS
+ cmd: |
+ salt-call maas.deploy_machines;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Wait for machines deployed
+ cmd: |
+ salt-call maas.machines_status && timeout 300 salt-call state.sls maas.machines.wait_for_deployed
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 6, delay: 5}
+ skip_fail: false
+
+{{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
+{{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
+{{ SHARED_WORKAROUNDS.CLEAR_CEPH_OSD_DRIVES() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/tempest_skip.list b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/tempest_skip.list
new file mode 100644
index 0000000..c9c567a
--- /dev/null
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/tempest_skip.list
@@ -0,0 +1,30 @@
+# Globally disabled inside the 'ci-tempest' docker image
+tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container
+tempest.api.object_storage.test_healthcheck.HealthcheckTest.test_get_healthcheck
+tempest.api.object_storage.test_container_sync_middleware.ContainerSyncMiddlewareTest.test_container_synchronization
+tempest.api.object_storage.test_crossdomain.CrossdomainTest.test_get_crossdomain_policy
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_value_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_name_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_exceeds_overall_metadata_count
+tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value
+tempest.api.object_storage.test_account_services_negative.AccountNegativeTest.test_list_containers_with_non_authorized_user
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota\b
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object\b
+tempest.api.object_storage.test_account_quotas_negative.AccountQuotasNegativeTest.test_user_modify_quota
+
+# PROD-22111 Need to align integration CI labs configuration to pass Tempest tests with WaitCondition
+heat_tempest_plugin.tests.functional.test_os_wait_condition.OSWaitCondition.test_create_stack_with_multi_signal_waitcondition
+heat_tempest_plugin.tests.scenario.test_server_cfn_init.CfnInitIntegrationTest.test_server_cfn_init\b
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_raw
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+heat_tempest_plugin.tests.scenario.test_autoscaling_lbv2.AutoscalingLoadBalancerv2Test.test_autoscaling_loadbalancer_neutron
+
+# PROD-29650 failed with PortNotFound
+tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces_by_fixed_ip
+
+# PROD-33000 [OC][Infra] Instances don't have access to external net
+# tempest.api.compute.volumes.test_attach_volume.AttachVolumeShelveTestJSON.test_detach_volume_shelved_or_offload_server\b
+# tempest.api.compute.volumes.test_attach_volume.AttachVolumeShelveTestJSON.test_attach_volume_shelved_or_offload_server\b
+# tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_suspend_resume\b
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml
new file mode 100644
index 0000000..2ebdf1f
--- /dev/null
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml
@@ -0,0 +1,82 @@
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ssh_pwauth: True
+users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ - name: jenkins
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFSxeuXh2sO4VYL8N2dlNFVyNcr2RvoH4MeDD/cV2HThfU4/BcH6IOOWXSDibIU279bWVKCL7QUp3mf0Vf7HPuyFuC12QM+l7MwT0jCYh5um3hmAvM6Ga0nkhJygHexe9/rLEYzZJkIjP9/IS/YXSv8rhHg484wQ6qkEuq15nyMqil8tbDQCq0XQ+AWEpNpIa4pUoKmFMsOP8lq10KZXIXsJyZxizadr6Bh4Lm9LWrk8YCw7qP3rmgWxK/s8qXQh1ISZe6ONfcmk6p03qbh4H3CwKyWzxmnIHQvE6PgN/O+PuAZj3PbR2mkkJjYX4jNPlxvj8uTStaVPhAwfR9Spdx jenkins@cz8133
+
+disable_root: false
+chpasswd:
+ list: |
+ root:r00tme
+ jenkins:qalab
+ expire: False
+
+packages:
+ - openjdk-8-jre-headless
+ - libyaml-dev
+ - libffi-dev
+ - libvirt-dev
+ - python-dev
+ - python-pip
+ - python-virtualenv
+ #- python-psycopg2
+ - pkg-config
+ - vlan
+ - bridge-utils
+ - ebtables
+
+bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+runcmd:
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+ - find /etc/network/interfaces.d/ -type f -delete
+ - kill $(pidof /sbin/dhclient) || /bin/true
+ - ip a flush dev ens3
+ - ip a flush dev ens4
+ - rm -f /var/run/network/ifstate.ens3
+ - rm -f /var/run/network/ifstate.ens4
+ - ip route delete default || /bin/true
+ - ifup ens3
+ - ifup ens4
+
+write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet static
+ address $management_static_ip
+ netmask 255.255.254.0
+ gateway $management_gw
+ dns-nameservers $dnsaddress
+
+ auto ens4
+ iface ens4 inet static
+ address $control_static_ip
+ netmask 255.255.254.0
+
+ - path: /etc/bash_completion.d/fuel_devops30_activate
+ content: |
+ source /home/jenkins/fuel-devops30/bin/activate
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay-userdata.yaml b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay-userdata.yaml
new file mode 100644
index 0000000..bb6338c
--- /dev/null
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay-userdata.yaml
@@ -0,0 +1,81 @@
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ssh_pwauth: True
+users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+
+disable_root: false
+chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+runcmd:
+ - if lvs vg0; then pvresize $(pvdisplay -C -S vg_name=vg0 -o pv_name --noheadings | tail -n1); fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo resolvconf -u
+ #- sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '65%VG'
+ home:
+ size: '1%VG'
+ var_log:
+ size: '10%VG'
+ var_log_audit:
+ size: '5%VG'
+ var_tmp:
+ size: '10%VG'
+ tmp:
+ size: '5%VG'
+ owner: root:root
+
+growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ - '/dev/vdb3'
+ - '/dev/vdc3'
+ - '/dev/vdd3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay.hot b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay.hot
new file mode 100644
index 0000000..da17023
--- /dev/null
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay.hot
@@ -0,0 +1,143 @@
+---
+
+heat_template_version: queens
+
+description: MCP environment for bm-b300-cicd-queens-ovs-maas
+
+parameters:
+ instance_domain:
+ type: string
+ default: bm-b300-cicd-queens-ovs-maas.local
+ mcp_version:
+ type: string
+ env_name:
+ type: string
+ key_pair:
+ type: string
+ cfg_flavor:
+ type: string
+ foundation_image:
+ type: string
+ foundation_flavor:
+ type: string
+ bm_availability_zone:
+ type: string
+ control_subnet_cidr:
+ type: string
+ default: "10.167.11.0/23"
+ tenant_subnet_cidr:
+ type: string
+ default: "10.167.12.0/23"
+ external_subnet_cidr:
+ type: string
+ default: "172.17.42.0/26"
+ management_subnet_cidr:
+ type: string
+ default: "172.16.180.0/23"
+ management_subnet_cfg01_ip:
+ type: string
+ default: 172.16.180.2
+ management_subnet_gateway_ip:
+ type: string
+ default: 172.16.180.1
+ management_subnet_pool_start:
+ type: string
+ default: 172.16.180.3
+ management_subnet_pool_end:
+ type: string
+ default: 172.16.180.61
+ salt_master_control_ip:
+ type: string
+ default: 10.167.11.5
+ deploy_empty_node:
+ type: boolean
+ default: False
+
+resources:
+ subnets:
+ type: MCP::Subnets
+ properties:
+ stack_name: { get_param: "OS::stack_name" }
+ env_name: { get_param: env_name }
+ management_net: 'system-phys-2401'
+ control_net: 'system-phys-2404'
+ tenant_net: 'system-phys-2406'
+ external_net: 'system-phys-2403'
+ control_subnet_cidr: { get_param: control_subnet_cidr }
+ tenant_subnet_cidr: { get_param: tenant_subnet_cidr }
+ external_subnet_cidr: { get_param: external_subnet_cidr }
+ management_subnet_cidr: { get_param: management_subnet_cidr }
+ management_subnet_gateway_ip: { get_param: management_subnet_gateway_ip }
+ management_subnet_pool_start: { get_param: management_subnet_pool_start }
+ management_subnet_pool_end: { get_param: management_subnet_pool_end }
+
+ #flavors:
+ # type: MCP::Flavors
+
+ cfg01_node:
+ type: MCP::MasterNode
+ depends_on: [subnets]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ cfg01_flavor: re.jenkins.slave.large
+ availability_zone: { get_param: bm_availability_zone }
+ management_net: 'system-phys-2401'
+ control_net: 'system-phys-2404'
+ tenant_net: 'system-phys-2406'
+ external_net: 'system-phys-2403'
+ salt_master_control_ip: { get_param: salt_master_control_ip }
+ management_subnet_cfg01_ip: { get_param: management_subnet_cfg01_ip }
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [subnets, tenant_net_prefix] }, '5' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [subnets, external_net_prefix] }, '5' ]
+ instance_name: cfg01
+ instance_domain: {get_param: instance_domain}
+
+ foundation_node:
+ type: MCP::FoundationNode
+ depends_on: [subnets]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: foundation
+ availability_zone: { get_param: bm_availability_zone }
+ management_net: 'system-phys-2401'
+ control_net: 'system-phys-2404'
+ tenant_net: 'system-phys-2406'
+ external_net: 'system-phys-2403'
+ management_subnet_gateway_ip: { get_param: management_subnet_gateway_ip }
+ instance_image: { get_param: foundation_image }
+ instance_flavor: {get_param: foundation_flavor}
+ underlay_userdata: { get_file: ./underlay--user-data-foundation.yaml }
+ management_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [subnets, management_net_prefix] }, '251' ]
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [subnets, control_net_prefix] }, '6' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [subnets, tenant_net_prefix] }, '6' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [subnets, external_net_prefix] }, '6' ]
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+outputs:
+ foundation_public_ip:
+ description: foundation node IP address (management)
+ value:
+ get_attr:
+ - foundation_node
+ - instance_address
+...
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index 04adcc4..2499b71 100644
--- a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -21,9 +21,9 @@
control_vlan: '2404'
jenkins_pipelines_branch: 'release/2019.2.0'
- deploy_network_gateway: 172.16.164.1
- deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.164.0/26
+ deploy_network_gateway: 172.16.180.1
+ deploy_network_netmask: 255.255.254.0
+ deploy_network_subnet: 172.16.180.0/23
deployment_type: physical
dns_server01: 172.18.176.6
dns_server02: 172.18.224.6
@@ -32,22 +32,22 @@
infra_bond_mode: active-backup
infra_deploy_nic: eth0
infra_kvm01_control_address: 10.167.11.241
- infra_kvm01_deploy_address: 172.16.164.3
+ infra_kvm01_deploy_address: 172.16.180.3
infra_kvm01_hostname: kvm01
infra_kvm02_control_address: 10.167.11.242
- infra_kvm02_deploy_address: 172.16.164.4
+ infra_kvm02_deploy_address: 172.16.180.4
infra_kvm02_hostname: kvm02
infra_kvm03_control_address: 10.167.11.243
- infra_kvm03_deploy_address: 172.16.164.5
+ infra_kvm03_deploy_address: 172.16.180.5
infra_kvm03_hostname: kvm03
infra_kvm04_control_address: 10.167.11.244
- infra_kvm04_deploy_address: 172.16.164.6
+ infra_kvm04_deploy_address: 172.16.180.6
infra_kvm04_hostname: kvm04
infra_kvm05_control_address: 10.167.11.245
- infra_kvm05_deploy_address: 172.16.164.7
+ infra_kvm05_deploy_address: 172.16.180.7
infra_kvm05_hostname: kvm05
infra_kvm06_control_address: 10.167.11.246
- infra_kvm06_deploy_address: 172.16.164.8
+ infra_kvm06_deploy_address: 172.16.180.8
infra_kvm06_hostname: kvm06
infra_kvm_vip_address: 10.167.11.240
infra_primary_first_nic: eth1
@@ -55,11 +55,11 @@
kubernetes_enabled: 'False'
local_repositories: 'False'
maas_enabled: 'True'
- maas_deploy_address: 172.16.164.2
- maas_deploy_cidr: 172.16.164.0/26
- maas_deploy_gateway: 172.16.164.1
- maas_deploy_range_end: 172.16.164.61
- maas_deploy_range_start: 172.16.164.18
+ maas_deploy_address: 172.16.180.2
+ maas_deploy_cidr: 172.16.180.0/23
+ maas_deploy_gateway: 172.16.180.1
+ maas_deploy_range_end: 172.16.180.61
+ maas_deploy_range_start: 172.16.180.18
maas_dhcp_enabled: 'True'
maas_fabric_name: fabric-0
maas_hostname: cfg01
@@ -74,7 +74,7 @@
one1:
mac: "0c:c4:7a:33:24:be"
mode: "static"
- ip: "172.16.164.3"
+ ip: "172.16.180.3"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -111,7 +111,7 @@
one1:
mac: "0c:c4:7a:33:2d:6a"
mode: "static"
- ip: "172.16.164.4"
+ ip: "172.16.180.4"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -148,7 +148,7 @@
one1:
mac: "0c:c4:7a:69:a0:4c"
mode: "static"
- ip: "172.16.164.5"
+ ip: "172.16.180.5"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -186,7 +186,7 @@
one1:
mac: "0c:c4:7a:6c:83:5c"
mode: "static"
- ip: "172.16.164.6"
+ ip: "172.16.180.6"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -223,7 +223,7 @@
one1:
mac: "0c:c4:7a:6c:88:d6"
mode: "static"
- ip: "172.16.164.7"
+ ip: "172.16.180.7"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -260,7 +260,7 @@
one1:
mac: "0c:c4:7a:aa:df:ac"
mode: "static"
- ip: "172.16.164.8"
+ ip: "172.16.180.8"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -297,7 +297,7 @@
one1:
mac: "0c:c4:7a:aa:d5:84"
mode: "static"
- ip: "172.16.164.9"
+ ip: "172.16.180.9"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -334,7 +334,7 @@
one1:
mac: "0c:c4:7a:aa:d5:82"
mode: "static"
- ip: "172.16.164.10"
+ ip: "172.16.180.10"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -371,7 +371,7 @@
one1:
mac: "0c:c4:7a:6c:bc:f6"
mode: "static"
- ip: "172.16.164.11"
+ ip: "172.16.180.11"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -408,7 +408,7 @@
one1:
mac: "0c:c4:7a:aa:c9:02"
mode: "static"
- ip: "172.16.164.12"
+ ip: "172.16.180.12"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -445,7 +445,7 @@
one1:
mac: "0c:c4:7a:aa:d5:60"
mode: "static"
- ip: "172.16.164.13"
+ ip: "172.16.180.13"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -482,7 +482,7 @@
one1:
mac: "0c:c4:7a:aa:c9:3a"
mode: "static"
- ip: "172.16.164.14"
+ ip: "172.16.180.14"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -519,7 +519,7 @@
one1:
mac: "0c:c4:7a:aa:d6:aa"
mode: "static"
- ip: "172.16.164.15"
+ ip: "172.16.180.15"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -556,7 +556,7 @@
one1:
mac: "0c:c4:7a:aa:ce:30"
mode: "static"
- ip: "172.16.164.16"
+ ip: "172.16.180.16"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -597,7 +597,7 @@
one1:
mac: "0c:c4:7a:aa:e0:ce"
mode: "static"
- ip: "172.16.164.17"
+ ip: "172.16.180.17"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -639,7 +639,7 @@
openstack_compute_count: '3'
openstack_compute_rack01_hostname: cmp
openstack_compute_single_address_ranges: 10.167.11.15-10.167.11.17
- openstack_compute_deploy_address_ranges: 172.16.164.15-172.16.164.17
+ openstack_compute_deploy_address_ranges: 172.16.180.15-172.16.180.17
openstack_compute_tenant_address_ranges: 10.167.12.15-10.167.12.17
openstack_compute_backend_address_ranges: 10.167.12.15-10.167.12.17
openstack_control_address: 10.167.11.10
@@ -659,9 +659,9 @@
openstack_database_node03_address: 10.167.11.53
openstack_database_node03_hostname: dbs03
openstack_enabled: 'True'
- openstack_gateway_node01_deploy_address: 172.16.164.9
- openstack_gateway_node02_deploy_address: 172.16.164.10
- openstack_gateway_node03_deploy_address: 172.16.164.11
+ openstack_gateway_node01_deploy_address: 172.16.180.9
+ openstack_gateway_node02_deploy_address: 172.16.180.10
+ openstack_gateway_node03_deploy_address: 172.16.180.11
openstack_gateway_node01_address: 10.167.11.224
openstack_gateway_node01_hostname: gtw01
openstack_gateway_node02_hostname: gtw02
@@ -708,7 +708,7 @@
salt_api_password_hash: $6$qdIFillN$XnzP7oIXRcbroVch7nlthyrSekjKlWND8q2MtoMF3Wz2ymepjAOjyqpyR55nmbH9OQzS8EcQJ6sfr5hWKDesV1
salt_master_address: 10.167.11.5
salt_master_hostname: cfg01
- salt_master_management_address: 172.16.164.2
+ salt_master_management_address: 172.16.180.2
stacklight_enabled: 'True'
stacklight_log_address: 10.167.11.60
stacklight_log_hostname: log
@@ -756,7 +756,7 @@
ceph_public_network_allocation: storage
ceph_cluster_network: "10.167.11.0/24"
ceph_osd_single_address_ranges: "10.167.11.200-10.167.11.202"
- ceph_osd_deploy_address_ranges: "172.16.164.8-172.16.164.10"
+ ceph_osd_deploy_address_ranges: "172.16.180.8-172.16.180.10"
ceph_osd_storage_address_ranges: "10.167.11.200-10.167.11.202"
ceph_osd_backend_address_ranges: "10.167.12.200-10.167.12.202"
ceph_osd_data_disks: "/dev/sdb"
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay--user-data-foundation.yaml b/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay--user-data-foundation.yaml
index c9fd9d6..617b1fa 100644
--- a/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay--user-data-foundation.yaml
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay--user-data-foundation.yaml
@@ -68,7 +68,7 @@
auto ens3
iface ens3 inet static
address $management_static_ip
- netmask 255.255.255.192
+ netmask 255.255.254.0
gateway $management_gw
dns-nameservers $dnsaddress
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay.hot b/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay.hot
index 0d37b6f..f40da29 100644
--- a/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay.hot
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay.hot
@@ -33,19 +33,19 @@
default: "172.17.42.0/26"
management_subnet_cidr:
type: string
- default: "172.16.164.0/26"
+ default: "172.16.180.0/23"
management_subnet_cfg01_ip:
type: string
- default: 172.16.164.2
+ default: 172.16.180.2
management_subnet_gateway_ip:
type: string
- default: 172.16.164.1
+ default: 172.16.180.1
management_subnet_pool_start:
type: string
- default: 172.16.164.3
+ default: 172.16.180.3
management_subnet_pool_end:
type: string
- default: 172.16.164.61
+ default: 172.16.180.61
salt_master_control_ip:
type: string
default: 10.167.11.5
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index e2f4d14..81d3bf7 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -36,9 +36,9 @@
control_vlan: '2404'
jenkins_pipelines_branch: 'release/2019.2.0'
- deploy_network_gateway: 172.16.164.1
- deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.164.0/26
+ deploy_network_gateway: 172.16.180.1
+ deploy_network_netmask: 255.255.254.0
+ deploy_network_subnet: 172.16.180.0/23
deployment_type: physical
dns_server01: 172.18.176.6
dns_server02: 172.18.224.6
@@ -47,22 +47,22 @@
infra_bond_mode: active-backup
infra_deploy_nic: eth0
infra_kvm01_control_address: 10.167.11.241
- infra_kvm01_deploy_address: 172.16.164.3
+ infra_kvm01_deploy_address: 172.16.180.3
infra_kvm01_hostname: kvm01
infra_kvm02_control_address: 10.167.11.242
- infra_kvm02_deploy_address: 172.16.164.4
+ infra_kvm02_deploy_address: 172.16.180.4
infra_kvm02_hostname: kvm02
infra_kvm03_control_address: 10.167.11.243
- infra_kvm03_deploy_address: 172.16.164.5
+ infra_kvm03_deploy_address: 172.16.180.5
infra_kvm03_hostname: kvm03
infra_kvm04_control_address: 10.167.11.244
- infra_kvm04_deploy_address: 172.16.164.6
+ infra_kvm04_deploy_address: 172.16.180.6
infra_kvm04_hostname: kvm04
infra_kvm05_control_address: 10.167.11.245
- infra_kvm05_deploy_address: 172.16.164.7
+ infra_kvm05_deploy_address: 172.16.180.7
infra_kvm05_hostname: kvm05
infra_kvm06_control_address: 10.167.11.246
- infra_kvm06_deploy_address: 172.16.164.8
+ infra_kvm06_deploy_address: 172.16.180.8
infra_kvm06_hostname: kvm06
infra_kvm_vip_address: 10.167.11.240
infra_primary_first_nic: eth1
@@ -70,11 +70,11 @@
kubernetes_enabled: 'False'
local_repositories: 'False'
maas_enabled: 'True'
- maas_deploy_address: 172.16.164.2
- maas_deploy_cidr: 172.16.164.0/26
- maas_deploy_gateway: 172.16.164.1
- maas_deploy_range_end: 172.16.164.62
- maas_deploy_range_start: 172.16.164.18
+ maas_deploy_address: 172.16.180.2
+ maas_deploy_cidr: 172.16.180.0/23
+ maas_deploy_gateway: 172.16.180.1
+ maas_deploy_range_end: 172.16.180.62
+ maas_deploy_range_start: 172.16.180.18
maas_dhcp_enabled: 'True'
maas_fabric_name: fabric-0
maas_hostname: cfg01
@@ -89,7 +89,7 @@
one1:
mac: "0c:c4:7a:33:24:be"
mode: "static"
- ip: "172.16.164.3"
+ ip: "172.16.180.3"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -126,7 +126,7 @@
one1:
mac: "0c:c4:7a:33:2d:6a"
mode: "static"
- ip: "172.16.164.4"
+ ip: "172.16.180.4"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -163,7 +163,7 @@
one1:
mac: "0c:c4:7a:69:a0:4c"
mode: "static"
- ip: "172.16.164.5"
+ ip: "172.16.180.5"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -201,7 +201,7 @@
one1:
mac: "0c:c4:7a:6c:83:5c"
mode: "static"
- ip: "172.16.164.6"
+ ip: "172.16.180.6"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -238,7 +238,7 @@
one1:
mac: "0c:c4:7a:6c:88:d6"
mode: "static"
- ip: "172.16.164.7"
+ ip: "172.16.180.7"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -275,7 +275,7 @@
one1:
mac: "0c:c4:7a:aa:df:ac"
mode: "static"
- ip: "172.16.164.8"
+ ip: "172.16.180.8"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -312,7 +312,7 @@
one1:
mac: "0c:c4:7a:aa:d5:84"
mode: "static"
- ip: "172.16.164.9"
+ ip: "172.16.180.9"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -349,7 +349,7 @@
one1:
mac: "0c:c4:7a:aa:d5:82"
mode: "static"
- ip: "172.16.164.10"
+ ip: "172.16.180.10"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -386,7 +386,7 @@
one1:
mac: "0c:c4:7a:6c:bc:f6"
mode: "static"
- ip: "172.16.164.11"
+ ip: "172.16.180.11"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -423,7 +423,7 @@
one1:
mac: "0c:c4:7a:aa:c9:02"
mode: "static"
- ip: "172.16.164.12"
+ ip: "172.16.180.12"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -460,7 +460,7 @@
one1:
mac: "0c:c4:7a:aa:d5:60"
mode: "static"
- ip: "172.16.164.13"
+ ip: "172.16.180.13"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -497,7 +497,7 @@
one1:
mac: "0c:c4:7a:aa:c9:3a"
mode: "static"
- ip: "172.16.164.14"
+ ip: "172.16.180.14"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -534,7 +534,7 @@
one1:
mac: "0c:c4:7a:aa:d6:aa"
mode: "static"
- ip: "172.16.164.15"
+ ip: "172.16.180.15"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -571,7 +571,7 @@
one1:
mac: "0c:c4:7a:aa:ce:30"
mode: "static"
- ip: "172.16.164.16"
+ ip: "172.16.180.16"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -612,7 +612,7 @@
one1:
mac: "0c:c4:7a:aa:e0:ce"
mode: "static"
- ip: "172.16.164.17"
+ ip: "172.16.180.17"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -654,7 +654,7 @@
openstack_compute_count: '3'
openstack_compute_rack01_hostname: cmp
openstack_compute_single_address_ranges: 10.167.11.15-10.167.11.17
- openstack_compute_deploy_address_ranges: 172.16.164.15-172.16.164.17
+ openstack_compute_deploy_address_ranges: 172.16.180.15-172.16.180.17
openstack_compute_tenant_address_ranges: 10.167.12.15-10.167.12.17
openstack_compute_backend_address_ranges: 10.167.12.15-10.167.12.17
openstack_control_address: 10.167.11.10
@@ -674,9 +674,9 @@
openstack_database_node03_address: 10.167.11.53
openstack_database_node03_hostname: dbs03
openstack_enabled: 'True'
- openstack_gateway_node01_deploy_address: 172.16.164.9
- openstack_gateway_node02_deploy_address: 172.16.164.10
- openstack_gateway_node03_deploy_address: 172.16.164.11
+ openstack_gateway_node01_deploy_address: 172.16.180.9
+ openstack_gateway_node02_deploy_address: 172.16.180.10
+ openstack_gateway_node03_deploy_address: 172.16.180.11
openstack_gateway_node01_address: 10.167.11.224
openstack_gateway_node01_hostname: gtw01
openstack_gateway_node02_hostname: gtw02
@@ -722,7 +722,7 @@
salt_api_password_hash: $6$qdIFillN$XnzP7oIXRcbroVch7nlthyrSekjKlWND8q2MtoMF3Wz2ymepjAOjyqpyR55nmbH9OQzS8EcQJ6sfr5hWKDesV1
salt_master_address: 10.167.11.5
salt_master_hostname: cfg01
- salt_master_management_address: 172.16.164.2
+ salt_master_management_address: 172.16.180.2
stacklight_enabled: 'True'
stacklight_log_address: 10.167.11.60
stacklight_log_hostname: log
@@ -779,7 +779,7 @@
ceph_public_network_allocation: storage
ceph_cluster_network: "10.167.11.0/24"
ceph_osd_single_address_ranges: "10.167.11.200-10.167.11.202"
- ceph_osd_deploy_address_ranges: "172.16.164.8-172.16.164.10"
+ ceph_osd_deploy_address_ranges: "172.16.180.8-172.16.180.10"
ceph_osd_storage_address_ranges: "10.167.11.200-10.167.11.202"
ceph_osd_backend_address_ranges: "10.167.12.200-10.167.12.202"
@@ -816,6 +816,7 @@
openstack_mysql_x509_enabled: 'True'
rabbitmq_ssl_enabled: 'True'
openstack_rabbitmq_x509_enabled: 'True'
+ openstack_rabbitmq_standalone_mode: 'True'
openstack_internal_protocol: 'https'
tenant_telemetry_enabled: 'True'
gnocchi_aggregation_storage: ceph
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml b/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml
index c9fd9d6..617b1fa 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml
@@ -68,7 +68,7 @@
auto ens3
iface ens3 inet static
address $management_static_ip
- netmask 255.255.255.192
+ netmask 255.255.254.0
gateway $management_gw
dns-nameservers $dnsaddress
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay.hot b/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay.hot
index eac31bf..4306ae5 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay.hot
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay.hot
@@ -33,19 +33,19 @@
default: "172.17.42.0/26"
management_subnet_cidr:
type: string
- default: "172.16.164.0/26"
+ default: "172.16.180.0/23"
management_subnet_cfg01_ip:
type: string
- default: 172.16.164.2
+ default: 172.16.180.2
management_subnet_gateway_ip:
type: string
- default: 172.16.164.1
+ default: 172.16.180.1
management_subnet_pool_start:
type: string
- default: 172.16.164.3
+ default: 172.16.180.3
management_subnet_pool_end:
type: string
- default: 172.16.164.61
+ default: 172.16.180.61
salt_master_control_ip:
type: string
default: 10.167.11.5
diff --git a/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-cookiecutter-openstack_odl.yaml b/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-cookiecutter-openstack_odl.yaml
index 3458dbc..abf1834 100644
--- a/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-cookiecutter-openstack_odl.yaml
+++ b/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-cookiecutter-openstack_odl.yaml
@@ -305,6 +305,7 @@
openstack_message_queue_node03_address: 10.167.11.43
openstack_message_queue_node03_hostname: msg03
openstack_network_engine: ovs
+ openstack_ovs_dvr_enabled: 'False'
openstack_neutron_qos: 'True'
openstack_neutron_vlan_aware_vms: 'True'
openstack_nfv_dpdk_enabled: 'False'
@@ -316,7 +317,6 @@
openstack_nfv_sriov_pf_nic: enp5s0f1
openstack_nova_cpu_pinning: 6,7,8,9,10,11
openstack_nova_compute_reserved_host_memory_mb: '900'
- openstack_ovs_dvr_enabled: 'True'
openstack_ovs_encapsulation_type: vxlan
openstack_ovs_encapsulation_vlan_range: 2402:2406
openstack_proxy_address: 10.167.11.80
@@ -360,6 +360,8 @@
stacklight_telemetry_node02_hostname: mtr02
stacklight_telemetry_node03_address: 10.167.11.99
stacklight_telemetry_node03_hostname: mtr03
+ opendaylight_control_node01_address: 10.167.11.220
+ opendaylight_control_node01_hostname: odl01
static_ips_on_deploy_network_enabled: 'False'
tenant_network_gateway: 10.167.13.1
tenant_network_netmask: 255.255.255.0
diff --git a/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-vcp-environment.yaml b/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-vcp-environment.yaml
index 5717e2e..b2e6985 100644
--- a/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-vcp-environment.yaml
+++ b/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-vcp-environment.yaml
@@ -339,3 +339,14 @@
role: single_dhcp
ens3:
role: single_ctl
+
+ odl01.bm-e7-cicd-pike-odl-maas.local:
+ reclass_storage_name: opendaylight_control_node01
+ roles:
+ - opendaylight_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
diff --git a/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index 28d5916..026932b 100644
--- a/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -190,7 +190,7 @@
devices:
- sdb
volume:
- cinder-volumes-pool:
+ cinder-vg-pool:
size: 800G
power_parameters:
power_address: "5.43.227.11"
@@ -243,7 +243,7 @@
devices:
- sdb
volume:
- cinder-volumes-pool:
+ cinder-vg-pool:
size: 800G
power_parameters:
power_address: "5.43.227.19"
@@ -263,10 +263,10 @@
openstack_cluster_size: compact
openstack_compute_count: '2'
openstack_compute_rack01_hostname: cmp
- openstack_compute_single_address_ranges: 10.167.11.15-10.167.11.16
+ openstack_compute_single_address_ranges: 10.167.11.15-10.167.11.254
openstack_compute_deploy_address_ranges: 172.16.162.73-172.16.162.74
- openstack_compute_tenant_address_ranges: 10.167.13.15-10.167.13.16
- openstack_compute_backend_address_ranges: 10.167.13.15-10.167.13.16
+ openstack_compute_tenant_address_ranges: 10.167.13.15-10.167.13.254
+ openstack_compute_backend_address_ranges: 10.167.13.15-10.167.13.254
openstack_control_address: 10.167.11.10
openstack_control_hostname: ctl
openstack_control_node01_address: 10.167.11.11
@@ -305,6 +305,7 @@
openstack_message_queue_node03_address: 10.167.11.43
openstack_message_queue_node03_hostname: msg03
openstack_network_engine: ovs
+ openstack_ovs_dvr_enabled: 'False'
openstack_neutron_qos: 'True'
openstack_neutron_vlan_aware_vms: 'True'
openstack_nfv_dpdk_enabled: 'False'
@@ -316,7 +317,6 @@
openstack_nfv_sriov_pf_nic: enp5s0f1
openstack_nova_cpu_pinning: 6,7,8,9,10,11
openstack_nova_compute_reserved_host_memory_mb: '900'
- openstack_ovs_dvr_enabled: 'True'
openstack_ovs_encapsulation_type: vxlan
openstack_ovs_encapsulation_vlan_range: 2402:2406
openstack_proxy_address: 10.167.11.80
diff --git a/tcp_tests/templates/cookied-model-generator/salt_bm-b300-cicd-queens-ovs-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_bm-b300-cicd-queens-ovs-maas.yaml
new file mode 100644
index 0000000..73b5ad6
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_bm-b300-cicd-queens-ovs-maas.yaml
@@ -0,0 +1,72 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+# Other salt model repository parameters see in shared-salt.yaml
+{% set LAB_CONFIG_NAME = 'bm-b300-cicd-queens-ovs-maas' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','bm-b300-cicd-queens-ovs-maas') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-environment.yaml','salt-context-cookiecutter-openstack_ovs.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2404') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2406') %}
+
+{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
+{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
+{%- set CISCO_PASS = os_env('CISCO_PASS', 'cisco_pass') %}
+
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_UPLOAD_AND_IMPORT_GPG_ENCRYPTION_KEY() }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+ cmd: |
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-baremetal/br\_baremetal/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
+ salt '*' saltutil.refresh_pillar;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
+- description: Defining username and password params for IPMI access
+ cmd: |
+ sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/baremetal_nodes.yml;
+ sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/baremetal_nodes.yml;
+ sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/switch.yml;
+ sed -i 's/==CISCO_PASS==/${_param:cisco_password}/g' /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/switch.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: "Add user/password for IPMI access"
+ cmd: |
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+ reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/baremetal_nodes.yml;
+ reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/baremetal_nodes.yml;
+ reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/switch.yml;
+ reclass-tools add-key parameters._param.cisco_password {{ CISCO_PASS }} /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/switch.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/e_lab_engineer b/tcp_tests/templates/e_lab_engineer
index 91bcf2f..1c1a7bf 100644
--- a/tcp_tests/templates/e_lab_engineer
+++ b/tcp_tests/templates/e_lab_engineer
@@ -1,5 +1,4 @@
176.74.217.64
-185.8.59.228
5.43.225.89
5.43.227.11
5.43.227.19
diff --git a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml
index 50653c2..71c6fe5 100644
--- a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml
@@ -136,7 +136,7 @@
one1:
mac: "0c:c4:7a:33:24:be"
mode: "static"
- ip: "172.16.164.3"
+ ip: "172.16.180.3"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -173,7 +173,7 @@
one1:
mac: "0c:c4:7a:33:2d:6a"
mode: "static"
- ip: "172.16.164.4"
+ ip: "172.16.180.4"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -210,7 +210,7 @@
one1:
mac: "0c:c4:7a:69:a0:4c"
mode: "static"
- ip: "172.16.164.5"
+ ip: "172.16.180.5"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -248,7 +248,7 @@
one1:
mac: "0c:c4:7a:6c:83:5c"
mode: "static"
- ip: "172.16.164.6"
+ ip: "172.16.180.6"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -285,7 +285,7 @@
one1:
mac: "0c:c4:7a:6c:88:d6"
mode: "static"
- ip: "172.16.164.7"
+ ip: "172.16.180.7"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -322,7 +322,7 @@
one1:
mac: "0c:c4:7a:aa:df:ac"
mode: "static"
- ip: "172.16.164.8"
+ ip: "172.16.180.8"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -359,7 +359,7 @@
# one1:
# mac: "0c:c4:7a:aa:d5:84"
# mode: "static"
- # ip: "172.16.164.9"
+ # ip: "172.16.180.9"
# subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
# gateway: ${_param:deploy_network_gateway}
# name: one1
@@ -396,7 +396,7 @@
# one1:
# mac: "0c:c4:7a:aa:d5:82"
# mode: "static"
- # ip: "172.16.164.10"
+ # ip: "172.16.180.10"
# subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
# gateway: ${_param:deploy_network_gateway}
# name: one1
@@ -433,7 +433,7 @@
# one1:
# mac: "0c:c4:7a:6c:bc:f6"
# mode: "static"
- # ip: "172.16.164.11"
+ # ip: "172.16.180.11"
# subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
# gateway: ${_param:deploy_network_gateway}
# name: one1
@@ -470,7 +470,7 @@
one1:
mac: "0c:c4:7a:aa:c9:02"
mode: "static"
- ip: "172.16.164.12"
+ ip: "172.16.180.12"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -507,7 +507,7 @@
one1:
mac: "0c:c4:7a:aa:d5:60"
mode: "static"
- ip: "172.16.164.13"
+ ip: "172.16.180.13"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -544,7 +544,7 @@
one1:
mac: "0c:c4:7a:aa:c9:3a"
mode: "static"
- ip: "172.16.164.14"
+ ip: "172.16.180.14"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -581,7 +581,7 @@
one1:
mac: "0c:c4:7a:aa:d6:aa"
mode: "static"
- ip: "172.16.164.15"
+ ip: "172.16.180.15"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -618,7 +618,7 @@
one1:
mac: "0c:c4:7a:aa:ce:30"
mode: "static"
- ip: "172.16.164.16"
+ ip: "172.16.180.16"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -659,7 +659,7 @@
one1:
mac: "0c:c4:7a:aa:e0:ce"
mode: "static"
- ip: "172.16.164.17"
+ ip: "172.16.180.17"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
diff --git a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml
index 561c8a1..21d8fb4 100644
--- a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml
@@ -76,6 +76,7 @@
retry: {count: 6, delay: 5}
skip_fail: false
-{{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
+{{ SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS() }}
+{{ SHARED_WORKAROUNDS.DELETE_BOND0() }}
{{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
{{ SHARED_WORKAROUNDS.CLEAR_CEPH_OSD_DRIVES() }}
diff --git a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/underlay.hot b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/underlay.hot
index e11335f..8b01d59 100644
--- a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/underlay.hot
+++ b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/underlay.hot
@@ -35,19 +35,19 @@
default: "10.9.0.0/24"
management_subnet_cidr:
type: string
- default: "172.16.164.0/26"
+ default: "172.16.180.0/23"
management_subnet_cfg01_ip:
type: string
- default: 172.16.164.2
+ default: 172.16.180.2
management_subnet_gateway_ip:
type: string
- default: 172.16.164.1
+ default: 172.16.180.1
management_subnet_pool_start:
type: string
- default: 172.16.164.3
+ default: 172.16.180.3
management_subnet_pool_end:
type: string
- default: 172.16.164.61
+ default: 172.16.180.61
salt_master_control_ip:
type: string
default: 10.6.0.15
diff --git a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml
index d08c4c8..adac30b 100644
--- a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml
@@ -136,7 +136,7 @@
one1:
mac: "0c:c4:7a:33:24:be"
mode: "static"
- ip: "172.16.164.3"
+ ip: "172.16.180.3"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -173,7 +173,7 @@
one1:
mac: "0c:c4:7a:33:2d:6a"
mode: "static"
- ip: "172.16.164.4"
+ ip: "172.16.180.4"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -210,7 +210,7 @@
one1:
mac: "0c:c4:7a:69:a0:4c"
mode: "static"
- ip: "172.16.164.5"
+ ip: "172.16.180.5"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -248,7 +248,7 @@
one1:
mac: "0c:c4:7a:6c:83:5c"
mode: "static"
- ip: "172.16.164.6"
+ ip: "172.16.180.6"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -285,7 +285,7 @@
one1:
mac: "0c:c4:7a:6c:88:d6"
mode: "static"
- ip: "172.16.164.7"
+ ip: "172.16.180.7"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -322,7 +322,7 @@
one1:
mac: "0c:c4:7a:aa:df:ac"
mode: "static"
- ip: "172.16.164.8"
+ ip: "172.16.180.8"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -359,7 +359,7 @@
# one1:
# mac: "0c:c4:7a:aa:d5:84"
# mode: "static"
- # ip: "172.16.164.9"
+ # ip: "172.16.180.9"
# subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
# gateway: ${_param:deploy_network_gateway}
# name: one1
@@ -396,7 +396,7 @@
# one1:
# mac: "0c:c4:7a:aa:d5:82"
# mode: "static"
- # ip: "172.16.164.10"
+ # ip: "172.16.180.10"
# subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
# gateway: ${_param:deploy_network_gateway}
# name: one1
@@ -433,7 +433,7 @@
# one1:
# mac: "0c:c4:7a:6c:bc:f6"
# mode: "static"
- # ip: "172.16.164.11"
+ # ip: "172.16.180.11"
# subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
# gateway: ${_param:deploy_network_gateway}
# name: one1
@@ -470,7 +470,7 @@
one1:
mac: "0c:c4:7a:aa:c9:02"
mode: "static"
- ip: "172.16.164.12"
+ ip: "172.16.180.12"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -507,7 +507,7 @@
one1:
mac: "0c:c4:7a:aa:d5:60"
mode: "static"
- ip: "172.16.164.13"
+ ip: "172.16.180.13"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -544,7 +544,7 @@
one1:
mac: "0c:c4:7a:aa:c9:3a"
mode: "static"
- ip: "172.16.164.14"
+ ip: "172.16.180.14"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -581,7 +581,7 @@
one1:
mac: "0c:c4:7a:aa:d6:aa"
mode: "static"
- ip: "172.16.164.15"
+ ip: "172.16.180.15"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -618,7 +618,7 @@
one1:
mac: "0c:c4:7a:aa:ce:30"
mode: "static"
- ip: "172.16.164.16"
+ ip: "172.16.180.16"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -659,7 +659,7 @@
one1:
mac: "0c:c4:7a:aa:e0:ce"
mode: "static"
- ip: "172.16.164.17"
+ ip: "172.16.180.17"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
diff --git a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml
index df263e9..43e40d4 100644
--- a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml
@@ -76,6 +76,7 @@
retry: {count: 6, delay: 5}
skip_fail: false
-{{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
+{{ SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS() }}
+{{ SHARED_WORKAROUNDS.DELETE_BOND0() }}
{{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
{{ SHARED_WORKAROUNDS.CLEAR_CEPH_OSD_DRIVES() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/underlay.hot b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/underlay.hot
index b02b758..8bc2a84 100644
--- a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/underlay.hot
+++ b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/underlay.hot
@@ -35,19 +35,19 @@
default: "10.9.0.0/24"
management_subnet_cidr:
type: string
- default: "172.16.164.0/26"
+ default: "172.16.180.0/23"
management_subnet_cfg01_ip:
type: string
- default: 172.16.164.2
+ default: 172.16.180.2
management_subnet_gateway_ip:
type: string
- default: 172.16.164.1
+ default: 172.16.180.1
management_subnet_pool_start:
type: string
- default: 172.16.164.3
+ default: 172.16.180.3
management_subnet_pool_end:
type: string
- default: 172.16.164.61
+ default: 172.16.180.61
salt_master_control_ip:
type: string
default: 10.6.0.15
diff --git a/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay.hot b/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay.hot
index c7f6ea6..6971e77 100644
--- a/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay.hot
+++ b/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay.hot
@@ -130,7 +130,7 @@
env_name: { get_param: env_name }
mcp_version: { get_param: mcp_version }
cfg01_flavor: { get_param: cfg_flavor }
- availability_zone: { get_param: bm_availability_zone }
+ availability_zone: { get_param: vm_availability_zone }
management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
@@ -678,7 +678,7 @@
instance_name: foundation
instance_image: { get_param: foundation_image }
instance_flavor: {get_param: foundation_flavor}
- availability_zone: { get_param: bm_availability_zone }
+ availability_zone: { get_param: vm_availability_zone }
management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
diff --git a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay--user-data-foundation.yaml b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay--user-data-foundation.yaml
index 1677dcd..b1ef08b 100644
--- a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay--user-data-foundation.yaml
+++ b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay--user-data-foundation.yaml
@@ -41,7 +41,7 @@
runcmd:
# Create swap
- - fallocate -l 16G /swapfile
+ - fallocate -l 2G /swapfile
- chmod 600 /swapfile
- mkswap /swapfile
- swapon /swapfile
diff --git a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay.hot b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay.hot
index 0c92b47..928d76f 100644
--- a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay.hot
+++ b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay.hot
@@ -127,7 +127,7 @@
env_name: { get_param: env_name }
mcp_version: { get_param: mcp_version }
cfg01_flavor: { get_param: cfg_flavor }
- availability_zone: { get_param: bm_availability_zone }
+ availability_zone: { get_param: vm_availability_zone }
management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
@@ -932,7 +932,7 @@
instance_name: foundation
instance_image: { get_param: foundation_image }
instance_flavor: {get_param: foundation_flavor}
- availability_zone: { get_param: bm_availability_zone }
+ availability_zone: { get_param: vm_availability_zone }
management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
diff --git a/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay--user-data-foundation.yaml b/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay--user-data-foundation.yaml
index 1677dcd..b1ef08b 100644
--- a/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay--user-data-foundation.yaml
+++ b/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay--user-data-foundation.yaml
@@ -41,7 +41,7 @@
runcmd:
# Create swap
- - fallocate -l 16G /swapfile
+ - fallocate -l 2G /swapfile
- chmod 600 /swapfile
- mkswap /swapfile
- swapon /swapfile
diff --git a/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay.hot b/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay.hot
index 1d24327..6319401 100644
--- a/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay.hot
+++ b/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay.hot
@@ -130,7 +130,7 @@
env_name: { get_param: env_name }
mcp_version: { get_param: mcp_version }
cfg01_flavor: { get_param: cfg_flavor }
- availability_zone: { get_param: bm_availability_zone }
+ availability_zone: { get_param: vm_availability_zone }
management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
@@ -678,7 +678,7 @@
instance_name: foundation
instance_image: { get_param: foundation_image }
instance_flavor: {get_param: foundation_flavor}
- availability_zone: { get_param: bm_availability_zone }
+ availability_zone: { get_param: vm_availability_zone }
management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
diff --git a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay--user-data-foundation.yaml b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay--user-data-foundation.yaml
index 1677dcd..b1ef08b 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay--user-data-foundation.yaml
+++ b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay--user-data-foundation.yaml
@@ -41,7 +41,7 @@
runcmd:
# Create swap
- - fallocate -l 16G /swapfile
+ - fallocate -l 2G /swapfile
- chmod 600 /swapfile
- mkswap /swapfile
- swapon /swapfile
diff --git a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay.hot b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay.hot
index 8fc50af..726ee09 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay.hot
+++ b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay.hot
@@ -127,7 +127,7 @@
env_name: { get_param: env_name }
mcp_version: { get_param: mcp_version }
cfg01_flavor: { get_param: cfg_flavor }
- availability_zone: { get_param: bm_availability_zone }
+ availability_zone: { get_param: vm_availability_zone }
management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
@@ -932,7 +932,7 @@
instance_name: foundation
instance_image: { get_param: foundation_image }
instance_flavor: {get_param: foundation_flavor}
- availability_zone: { get_param: bm_availability_zone }
+ availability_zone: { get_param: vm_availability_zone }
management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
diff --git a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay--user-data-foundation.yaml b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay--user-data-foundation.yaml
index 1677dcd..b1ef08b 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay--user-data-foundation.yaml
+++ b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay--user-data-foundation.yaml
@@ -41,7 +41,7 @@
runcmd:
# Create swap
- - fallocate -l 16G /swapfile
+ - fallocate -l 2G /swapfile
- chmod 600 /swapfile
- mkswap /swapfile
- swapon /swapfile
diff --git a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay.hot b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay.hot
index 95fc69e..07f30aa 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay.hot
+++ b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay.hot
@@ -130,7 +130,7 @@
env_name: { get_param: env_name }
mcp_version: { get_param: mcp_version }
cfg01_flavor: { get_param: cfg_flavor }
- availability_zone: { get_param: bm_availability_zone }
+ availability_zone: { get_param: vm_availability_zone }
management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
@@ -678,7 +678,7 @@
instance_name: foundation
instance_image: { get_param: foundation_image }
instance_flavor: {get_param: foundation_flavor}
- availability_zone: { get_param: bm_availability_zone }
+ availability_zone: { get_param: vm_availability_zone }
management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
diff --git a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/tempest_skip.list b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/tempest_skip.list
index 2a89da1..9d9d435 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/tempest_skip.list
+++ b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/tempest_skip.list
@@ -66,4 +66,29 @@
tempest.scenario.test_snapshot_pattern.TestSnapshotPattern.test_snapshot_pattern
tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_volume_boot_pattern
+# PROD-25940 for contrail only
+tempest.api.network.admin.test_quotas.QuotasTest.test_quotas\b
+
+# PROD-33719 for contrail only
+tempest.api.network.admin.test_routers.RoutersAdminTest.test_update_router_set_gateway
+tempest.api.network.admin.test_routers.RoutersIpV6AdminTest.test_update_router_set_gateway
+
+# PROD-25128 [OC 4.x][Tempest] Parameter "strict_compliance" is False by default
+tempest.api.network.test_floating_ips_negative.FloatingIPNegativeTestJSON.test_associate_floatingip_port_ext_net_unreachable
+tempest.api.network.test_floating_ips_negative.FloatingIPNegativeTestJSON.test_create_floatingip_with_port_ext_net_unreachable
+
+# PROD-21671 [OpenContrail 4.0] Unable to update "subnet-id" for port (test_update_port_with_security_group_and_extra_attributes)
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_update_port_with_security_group_and_extra_attributes
+tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_security_group_and_extra_attributes
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_update_port_with_two_security_groups_and_extra_attributes
+tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_two_security_groups_and_extra_attributes
+
+# PROD-31179 Several tempest tests are failed on contrail configuration on checks for floating ip connectivity
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_port_security_macspoofing_port
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_update_router_admin_state
+
+# PROD-25586 [OC4.x][Tempest] Heat can't update port's mac address
+heat_tempest_plugin.tests.functional.test_create_update_neutron_port.UpdatePortTest.test_update_with_mac_address
+
diff --git a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay--user-data-foundation.yaml b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay--user-data-foundation.yaml
index 1677dcd..b1ef08b 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay--user-data-foundation.yaml
+++ b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay--user-data-foundation.yaml
@@ -41,7 +41,7 @@
runcmd:
# Create swap
- - fallocate -l 16G /swapfile
+ - fallocate -l 2G /swapfile
- chmod 600 /swapfile
- mkswap /swapfile
- swapon /swapfile
diff --git a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay.hot b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay.hot
index d9da3dd..4c12277 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay.hot
+++ b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay.hot
@@ -127,7 +127,7 @@
env_name: { get_param: env_name }
mcp_version: { get_param: mcp_version }
cfg01_flavor: { get_param: cfg_flavor }
- availability_zone: { get_param: bm_availability_zone }
+ availability_zone: { get_param: vm_availability_zone }
management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
@@ -932,7 +932,7 @@
instance_name: foundation
instance_image: { get_param: foundation_image }
instance_flavor: {get_param: foundation_flavor}
- availability_zone: { get_param: bm_availability_zone }
+ availability_zone: { get_param: vm_availability_zone }
management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
diff --git a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
index 83ea6b3..6e575b1 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
+++ b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
@@ -293,6 +293,7 @@
galera_ssl_enabled: 'True'
openstack_mysql_x509_enabled: 'True'
rabbitmq_ssl_enabled: 'True'
+ openstack_rabbitmq_standalone_mode: 'True'
openstack_rabbitmq_x509_enabled: 'True'
openstack_internal_protocol: 'https'
openstack_create_public_network: 'True'
diff --git a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay-userdata.yaml b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay-userdata.yaml
index bb6338c..d998d47 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay-userdata.yaml
+++ b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay-userdata.yaml
@@ -36,7 +36,7 @@
#- sudo route add default gw {gateway} {interface_name}
# Create swap
- - fallocate -l 16G /swapfile
+ - fallocate -l 2G /swapfile
- chmod 600 /swapfile
- mkswap /swapfile
- swapon /swapfile
diff --git a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay.hot b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay.hot
index 88997d7..a82aaf0 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay.hot
+++ b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay.hot
@@ -130,7 +130,7 @@
env_name: { get_param: env_name }
mcp_version: { get_param: mcp_version }
cfg01_flavor: { get_param: cfg_flavor }
- availability_zone: { get_param: bm_availability_zone }
+ availability_zone: { get_param: vm_availability_zone }
management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
@@ -678,7 +678,7 @@
instance_name: foundation
instance_image: { get_param: foundation_image }
instance_flavor: {get_param: foundation_flavor}
- availability_zone: { get_param: bm_availability_zone }
+ availability_zone: { get_param: vm_availability_zone }
management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 5621b02..200ae8d 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -454,6 +454,10 @@
chmod 0600 /tmp/{{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | basename }}
eval $(ssh-agent)
ssh-add /tmp/{{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | basename }}
+
+ git config --global user.name {{ HOSTNAME_CFG01 }}
+ git config --global user.email {{ HOSTNAME_CFG01 }}@example.com
+
export GIT_SSL_NO_VERIFY=true; git clone {{ COOKIECUTTER_TEMPLATES_REPOSITORY }} /root/cookiecutter-templates
{%- if COOKIECUTTER_REF_CHANGE != '' %}
@@ -544,13 +548,8 @@
cmd: |
set -e;
set -x;
- {%- if SALT_MODELS_SYSTEM_REF_CHANGE != '' %}
- pushd /srv/salt/reclass/classes/system/ && \
- {%- for item in SALT_MODELS_SYSTEM_REF_CHANGE.split(" ") %}
- git fetch {{ SALT_MODELS_SYSTEM_REPOSITORY }} {{ item }} && git cherry-pick FETCH_HEAD;
- {%- endfor %}
- popd;
- {%- elif SALT_MODELS_SYSTEM_COMMIT != '' %}
+
+ {%- if SALT_MODELS_SYSTEM_COMMIT != '' %}
pushd /srv/salt/reclass/classes/system/
git checkout {{ SALT_MODELS_SYSTEM_COMMIT }};
popd;
@@ -561,6 +560,14 @@
popd;
{%- endif %}
+ {%- if SALT_MODELS_SYSTEM_REF_CHANGE != '' %}
+ pushd /srv/salt/reclass/classes/system/ && \
+ {%- for item in SALT_MODELS_SYSTEM_REF_CHANGE.split(" ") %}
+ git fetch {{ SALT_MODELS_SYSTEM_REPOSITORY }} {{ item }} && git cherry-pick FETCH_HEAD;
+ {%- endfor %}
+ popd;
+ {%- endif %}
+
{%- if IS_CONTRAIL_LAB %}
export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/"
# vSRX IPs for tcp-qa images have 172.16.10.90 hardcoded
diff --git a/tcp_tests/templates/shared-workarounds.yaml b/tcp_tests/templates/shared-workarounds.yaml
index 5e508fd..8f2d67b 100644
--- a/tcp_tests/templates/shared-workarounds.yaml
+++ b/tcp_tests/templates/shared-workarounds.yaml
@@ -29,4 +29,19 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: true
+{%- endmacro %}
+
+
+{%- macro DELETE_BOND0() %}
+{######################################}
+- description: |
+ Delete empty bond0 interface on kvm and osd nodes. RelatedProd:PROD-35758
+
+ cmd: |
+ set -x;
+ salt -C 'osd* or kvm*' cmd.run 'ip link delete bond0'
+ salt -C 'osd* or kvm*' file.write /etc/modprobe.d/bonding.conf "options bonding max_bonds=0"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: true
{%- endmacro %}
\ No newline at end of file
diff --git a/tcp_tests/tests/system/conftest.py b/tcp_tests/tests/system/conftest.py
index 2ea36cf..0dabd4d 100644
--- a/tcp_tests/tests/system/conftest.py
+++ b/tcp_tests/tests/system/conftest.py
@@ -83,8 +83,7 @@
def pytest_addoption(parser):
- parser.addoption("--dont-switch-to-proposed",
- action="store_true",
- help="Skips switching Jenkins on cluster-under-test to "
- "the proposed branch before the applying "
- "the MCP updates")
+ parser.addoption("--update-to-tag",
+ action="store",
+ default=None,
+ help="For mcp_update test")
diff --git a/tcp_tests/tests/system/test_ceph_operations.py b/tcp_tests/tests/system/test_ceph_operations.py
index 6f2fb52..2a19258 100644
--- a/tcp_tests/tests/system/test_ceph_operations.py
+++ b/tcp_tests/tests/system/test_ceph_operations.py
@@ -1,70 +1,12 @@
+import time
+
import pytest
from tcp_tests import logger
+from string import Template
LOG = logger.logger
-xtra_network_interface = """
-parameters:
- _param:
- linux_network_interfaces:
- br_ctl:
- address: ${_param:single_address}
- enabled: True
- name_servers:
- - ${_param:dns_server01}
- - ${_param:dns_server02}
- netmask: ${_param:control_network_netmask}
- proto: static
- require_interfaces: ['ens4']
- type: bridge
- use_interfaces: ['ens4']
- ens3:
- enabled: True
- name: ens3
- proto: dhcp
- type: eth
- ens4:
- enabled: True
- ipflush_onchange: True
- name: ens4
- proto: manual
- type: eth
-"""
-
-add_osd_ceph_init_yml = """
-parameters:
- _param:
- ceph_osd_node04_hostname: xtra
- ceph_osd_node04_address: 10.6.0.205
- ceph_osd_system_codename: xenial
- linux:
- network:
- host:
- xtra:
- address: ${_param:ceph_osd_node04_address}
- names:
- - ${_param:ceph_osd_node04_hostname}
- - ${_param:ceph_osd_node04_hostname}.${_param:cluster_domain}
- """
-
-add_osd_config_init_yml = """
-parameters:
- reclass:
- storage:
- node:
- ceph_osd_node04:
- name: ${_param:ceph_osd_node04_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.ceph.osd
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: ${_param:ceph_osd_system_codename}
- single_address: ${_param:ceph_osd_node04_address}
- ceph_crush_parent: rack02
-"""
-
@pytest.fixture(scope='session')
def add_xtra_node_to_salt(salt_actions, underlay_actions,
@@ -80,14 +22,7 @@
cfg_node = [node['node_name'] for node in config.underlay.ssh
if 'salt_master' in node.get('roles')][0]
- salt_actions.enforce_state("*", "reclass")
- reclass_actions.add_class(
- "environment.heat-cicd-queens-dvr-sl.linux_network_interface",
- short_path="../nodes/_generated/xtra.*.yml")
- reclass_actions.add_class("environment.heat-cicd-queens-dvr-sl.overrides",
- short_path="../nodes/_generated/xtra.*.yml")
- reclass_actions.merge_context(yaml_context=xtra_network_interface,
- short_path="../nodes/_generated/xtra.*.yml")
+ # salt_actions.enforce_state("I@salt:master", "reclass")
underlay_actions.check_call(
"salt-key -a {node} --include-all -y".format(node=xtra_node),
@@ -98,9 +33,9 @@
"systemctl restart salt-minion",
node_name=xtra_node,
raise_on_err=False)
- salt_actions.enforce_state("I@salt:master", "reclass")
- salt_actions.enforce_state("xtra*", "linux")
- salt_actions.enforce_state("xtra*", "openssh")
+ time.sleep(15)
+ # salt_actions.enforce_state("xtra*", "linux")
+ # salt_actions.enforce_state("xtra*", "openssh")
yield
@@ -114,12 +49,71 @@
@pytest.fixture(scope='session')
def wa_prod36167(reclass_actions):
reclass_actions.delete_class("system.salt.control.virt",
- "classes/cluster/*/infra/kvm.yml")
+ "cluster/*/infra/kvm.yml")
-@pytest.mark.usefixtures("add_xtra_node_to_salt")
+@pytest.mark.usefixtures("add_xtra_node_to_salt",
+ "wa_prod36167")
class TestCephOsd(object):
+ add_osd_ceph_init_yml = """
+ parameters:
+ _param:
+ ceph_osd_node04_hostname: xtra
+ ceph_osd_node04_address: 10.6.0.205
+ ceph_osd_system_codename: xenial
+ linux:
+ network:
+ host:
+ xtra:
+ address: ${_param:ceph_osd_node04_address}
+ names:
+ - ${_param:ceph_osd_node04_hostname}
+ - ${_param:ceph_osd_node04_hostname}.${_param:cluster_domain}
+ """
+
+ add_osd_config_init_yml = """
+ parameters:
+ reclass:
+ storage:
+ node:
+ ceph_osd_node04:
+ name: ${_param:ceph_osd_node04_hostname}
+ domain: ${_param:cluster_domain}
+ classes:
+ - cluster.${_param:cluster_name}.ceph.osd
+ - environment.heat-cicd-queens-dvr-sl.linux_network_interface
+ - environment.heat-cicd-queens-dvr-sl.overrides
+ params:
+ salt_master_host: ${_param:reclass_config_master}
+ linux_system_codename: ${_param:ceph_osd_system_codename}
+ single_address: ${_param:ceph_osd_node04_address}
+ ceph_crush_parent: rack02
+ linux_network_interfaces:
+ br_ctl:
+ address: ${_param:ceph_osd_node04_address}
+ enabled: True
+ name_servers:
+ - ${_param:dns_server01}
+ - ${_param:dns_server02}
+ netmask: ${_param:control_network_netmask}
+ proto: static
+ require_interfaces: ['ens4']
+ type: bridge
+ use_interfaces: ['ens4']
+ ens3:
+ enabled: True
+ name: ens3
+ proto: dhcp
+ type: eth
+ ens4:
+ enabled: True
+ ipflush_onchange: True
+ name: ens4
+ proto: manual
+ type: eth
+ """
+
@pytest.fixture
def describe_node_in_reclass(self,
reclass_actions,
@@ -127,14 +121,17 @@
LOG.info("Executing pytest SETUP "
"from describe_node_in_reclass fixture")
reclass = reclass_actions
+
# ---- cluster/*/ceph/init.yml ---------------
- reclass.merge_context(yaml_context=add_osd_ceph_init_yml,
+ reclass.merge_context(yaml_context=self.add_osd_ceph_init_yml,
short_path="cluster/*/ceph/init.yml")
# ------- cluster/infra/config/init.yml -----------
- reclass.merge_context(yaml_context=add_osd_config_init_yml,
- short_path="cluster/*/infra/config/init.yml")
- salt_actions.run_state("*", "saltutil.refresh_pillar")
+ reclass.merge_context(yaml_context=build_node_config('osd'),
+ short_path="cluster/*/infra/config/nodes.yml")
+
+ # salt_actions.run_state("*", "saltutil.refresh_pillar")
+ # salt_actions.enforce_state("I@salt:master", "reclass")
@pytest.fixture
def remove_node_from_reclass(self,
@@ -143,7 +140,7 @@
reclass.delete_key(
key="parameters.reclass.storage.node.ceph_osd_node04",
- short_path="cluster/*/infra/config/init.yml"
+ short_path="cluster/*/infra/config/nodes.yml"
)
reclass.delete_key(
key="parameters.linux.network.host.xtra",
@@ -211,76 +208,36 @@
assert job_result == 'SUCCESS', job_description
-add_mon_ceph_init_yml = """
-parameters:
- _param:
- ceph_mon_node04_hostname: xtra
- ceph_mon_node04_address: 10.6.0.205
- ceph_mon_node04_ceph_public_address: 10.166.49.209
- ceph_mon_node04_ceph_backup_hour: 4
- ceph_mon_node04_ceph_backup_minute: 0
- linux:
- network:
- host:
- xtra:
- address: ${_param:ceph_mon_node04_address}
- names:
- - ${_param:ceph_mon_node04_hostname}
- - ${_param:ceph_mon_node04_hostname}.${_param:cluster_domain}
-"""
-
-add_mon_ceph_common_yml = """
-parameters:
- ceph:
- common:
- members:
- - name: ${_param:ceph_mon_node04_hostname}
- host: ${_param:ceph_mon_node04_address}
-"""
-
-add_mon_config_node_yml = """
-parameters:
- reclass:
- storage:
- node:
- ceph_mon_node04:
- name: ${_param:ceph_mon_node04_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.ceph.mon
- params:
- ceph_public_address: ${_param:ceph_mon_node04_ceph_public_address}
- ceph_backup_time_hour: ${_param:ceph_mon_node04_ceph_backup_hour}
- ceph_backup_time_minute: ${_param:ceph_mon_node04_ceph_backup_minute}
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: ${_param:ceph_mon_system_codename}
- single_address: ${_param:ceph_mon_node04_address}
- keepalived_vip_priority: 104
-""" # noqa: E501
-
-add_mon_infra_kvm_yml = """
-parameters:
- salt:
- control:
- size:
- ceph.mon:
- cpu: 8
- ram: 16384
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- node:
- cmn04:
- name: ${_param:ceph_mon_node04_hostname}
- provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: ceph.mon
-""" # noqa: E501
-
-
-@pytest.mark.usefixtures("add_xtra_node_to_salt")
+@pytest.mark.usefixtures("add_xtra_node_to_salt",
+ "wa_prod36167")
class TestCephMon(object):
+ add_mon_ceph_init_yml = """
+ parameters:
+ _param:
+ ceph_mon_node04_hostname: xtra
+ ceph_mon_node04_address: 10.6.0.205
+ ceph_mon_node04_ceph_public_address: 10.166.49.209
+ ceph_mon_node04_ceph_backup_hour: 4
+ ceph_mon_node04_ceph_backup_minute: 0
+ linux:
+ network:
+ host:
+ xtra:
+ address: ${_param:ceph_mon_node04_address}
+ names:
+ - ${_param:ceph_mon_node04_hostname}
+ - ${_param:ceph_mon_node04_hostname}.${_param:cluster_domain}
+ """
+
+ add_mon_ceph_common_yml = """
+ parameters:
+ ceph:
+ common:
+ members:
+ - name: ${_param:ceph_mon_node04_hostname}
+ host: ${_param:ceph_mon_node04_address}
+ """
+
@pytest.fixture
def describe_node_in_reclass(self,
reclass_actions, salt_actions):
@@ -288,18 +245,20 @@
"from describe_node_in_reclass fixture")
reclass = reclass_actions
# ---- cluster/*/ceph/init.yml --------------
- reclass.merge_context(yaml_context=add_mon_ceph_init_yml,
+ reclass.merge_context(yaml_context=self.add_mon_ceph_init_yml,
short_path="cluster/*/ceph/init.yml")
# ------- cluster/infra/config/init.yml -----------
- reclass.merge_context(yaml_context=add_mon_ceph_common_yml,
+ reclass.merge_context(yaml_context=self.add_mon_ceph_common_yml,
short_path="cluster/*/ceph/common.yml")
- reclass.merge_context(yaml_context=add_mon_config_node_yml,
+ reclass.merge_context(yaml_context=build_node_config('mon'),
short_path="cluster/*/infra/config/nodes.yml")
# ------- define settings for new mon node in KVM cluster -----------
- reclass.merge_context(yaml_context=add_mon_infra_kvm_yml,
- short_path="cluster/*/infra/kvm.yml")
+ # Commented because we don't add VM machine, we add already
+ # deployed node
+ # reclass.merge_context(yaml_context=add_mon_infra_kvm_yml,
+ # short_path="cluster/*/infra/kvm.yml")
salt_actions.run_state("*", "saltutil.refresh_pillar")
@@ -311,7 +270,7 @@
reclass = reclass_actions
reclass.delete_key(
key="parameters.reclass.storage.node.ceph_mon_node04",
- short_path="cluster/*/infra/config/init.yml")
+ short_path="cluster/*/infra/config/nodes.yml")
reclass.delete_key(
key="parameters.salt.control.cluster.internal.node.cmn04",
short_path="cluster/*/infra/kvm.yml"
@@ -360,67 +319,48 @@
assert job_result == 'SUCCESS', job_description
-add_rgw_ceph_init_yml = """
-parameters:
- _param:
- ceph_rgw_node04_hostname: xtra
- ceph_rgw_node04_address: 10.6.0.205
- ceph_rgw_node04_ceph_public_address: 10.166.49.209
- linux:
- network:
- host:
- rgw04:
- address: ${_param:ceph_rgw_node04_address}
- names:
- - ${_param:ceph_rgw_node04_hostname}
- - ${_param:ceph_rgw_node04_hostname}.${_param:cluster_domain}
-""" # noqa: E501
-
-add_rgw_ceph_rgw_yml = """
-parameters:
- _param:
- cluster_node04_hostname: ${_param:ceph_rgw_node04_hostname}
- cluster_node04_address: ${_param:ceph_rgw_node04_address}
- ceph:
- common:
- keyring:
- rgw.xtra:
- caps:
- mon: "allow rw"
- osd: "allow rwx"
- haproxy:
- proxy:
- listen:
- radosgw:
- servers:
- - name: ${_param:cluster_node04_hostname}
- host: ${_param:cluster_node04_address}
- port: ${_param:haproxy_radosgw_source_port}
- params: check
-"""
-
-add_rgw_config_init_yml = """
-parameters:
- reclass:
- storage:
- node:
- ceph_rgw_node04:
- name: ${_param:ceph_rgw_node04_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.ceph.rgw
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: ${_param:ceph_rgw_system_codename}
- single_address: ${_param:ceph_rgw_node04_address}
- deploy_address: ${_param:ceph_rgw_node04_deploy_address}
- ceph_public_address: ${_param:ceph_rgw_node04_public_address}
- keepalived_vip_priority: 104
-"""
-
-
-@pytest.mark.usefixtures("add_xtra_node_to_salt")
+@pytest.mark.usefixtures("add_xtra_node_to_salt",
+ "wa_prod36167")
class TestCephRgw(object):
+ add_rgw_ceph_init_yml = """
+ parameters:
+ _param:
+ ceph_rgw_node04_hostname: xtra
+ ceph_rgw_node04_address: 10.6.0.205
+ ceph_rgw_node04_ceph_public_address: 10.166.49.209
+ linux:
+ network:
+ host:
+ rgw04:
+ address: ${_param:ceph_rgw_node04_address}
+ names:
+ - ${_param:ceph_rgw_node04_hostname}
+ - ${_param:ceph_rgw_node04_hostname}.${_param:cluster_domain}
+ """ # noqa: E501
+
+ add_rgw_ceph_rgw_yml = """
+ parameters:
+ _param:
+ cluster_node04_hostname: ${_param:ceph_rgw_node04_hostname}
+ cluster_node04_address: ${_param:ceph_rgw_node04_address}
+ ceph:
+ common:
+ keyring:
+ rgw.xtra:
+ caps:
+ mon: "allow rw"
+ osd: "allow rwx"
+ haproxy:
+ proxy:
+ listen:
+ radosgw:
+ servers:
+ - name: ${_param:cluster_node04_hostname}
+ host: ${_param:cluster_node04_address}
+ port: ${_param:haproxy_radosgw_source_port}
+ params: check
+ """
+
@pytest.fixture
def describe_node_in_reclass(self,
reclass_actions, salt_actions):
@@ -428,14 +368,14 @@
"from describe_node_in_reclass fixture")
reclass = reclass_actions
# ---- cluster/*/ceph/init.yml --------------
- reclass.merge_context(yaml_context=add_rgw_ceph_init_yml,
+ reclass.merge_context(yaml_context=self.add_rgw_ceph_init_yml,
short_path="cluster/*/ceph/init.yml")
- reclass.merge_context(yaml_context=add_rgw_ceph_rgw_yml,
+ reclass.merge_context(yaml_context=self.add_rgw_ceph_rgw_yml,
short_path="cluster/*/ceph/rgw.yml")
- reclass.merge_context(yaml_context=add_rgw_config_init_yml,
- short_path="cluster/*/infra/config/init.yml")
+ reclass.merge_context(yaml_context=build_node_config('rgw'),
+ short_path="cluster/*/infra/config/nodes.yml")
salt_actions.run_state("*", "saltutil.refresh_pillar")
@@ -492,10 +432,87 @@
assert job_result == 'SUCCESS', job_description
-@pytest.mark.usefixtures("add_xtra_node_to_salt")
-class TestCephMgr(object):
- def test_add_node(self):
- pass
+def build_node_config(node=''):
+ """
- def test_delete_node(self):
- pass
+ :param node: [osd, mon, rgw, mgr]
+ :return: string in yaml format
+ """
+
+ class _Template(Template):
+ delimiter = "#"
+ idpattern = '[A-Z]*'
+
+ template = _Template("""
+ parameters:
+ reclass:
+ storage:
+ node:
+ ceph_#NODE_node04:
+ name: ${_param:ceph_#NODE_node04_hostname}
+ domain: ${_param:cluster_domain}
+ classes:
+ - cluster.${_param:cluster_name}.ceph.#NODE
+ - environment.${_param:cluster_name}.linux_network_interface
+ - environment.${_param:cluster_name}.overrides
+ params:
+ salt_master_host: ${_param:reclass_config_master}
+ linux_system_codename: ${_param:ceph_#NODE_system_codename}
+ single_address: ${_param:ceph_#NODE_node04_address}
+ #OSDSETTINGS
+ #MONSETTINGS
+ #RGWSETTINGS
+ linux_network_interfaces:
+ br_ctl:
+ address: ${_param:ceph_#NODE_node04_address}
+ enabled: True
+ name_servers:
+ - ${_param:dns_server01}
+ - ${_param:dns_server02}
+ netmask: ${_param:control_network_netmask}
+ proto: static
+ require_interfaces: ['ens4']
+ type: bridge
+ use_interfaces: ['ens4']
+ ens3:
+ enabled: True
+ name: ens3
+ proto: dhcp
+ type: eth
+ ens4:
+ enabled: True
+ ipflush_onchange: True
+ name: ens4
+ proto: manual
+ type: eth
+ """)
+
+ data = {
+ 'NODE': node,
+ 'OSDSETTINGS': '',
+ 'MONSETTINGS': '',
+ 'RGWSETTINGS': ''
+ }
+ # # ------------------OSD specific settings ----------
+ if node == 'osd':
+ data['OSDSETTINGS'] = """
+ ceph_crush_parent: rack02
+ """
+ # # ------------------MON specific settings ----------
+ if node == 'mon':
+ data['MONSETTINGS'] = """
+ ceph_backup_time_hour: ${_param:ceph_mon_node04_ceph_backup_hour}
+ ceph_backup_time_minute: ${_param:ceph_mon_node04_ceph_backup_minute}
+ ceph_public_address: ${_param:ceph_mon_node04_ceph_public_address}
+ keepalived_vip_priority: 104
+ """ # noqa: E501
+ # # ------------------RGW specific settings -----------
+ if node == 'rgw':
+ data['RGWSETTINGS'] = """
+ ceph_public_address: ${_param:ceph_rgw_node04_ceph_public_address}
+ keepalived_vip_priority: 104
+ """ # noqa: E501
+
+ yaml_config = template.substitute(data)
+
+ return yaml_config
diff --git a/tcp_tests/tests/system/test_failover_ceph.py b/tcp_tests/tests/system/test_failover_ceph.py
index a89d711..02d7d28 100644
--- a/tcp_tests/tests/system/test_failover_ceph.py
+++ b/tcp_tests/tests/system/test_failover_ceph.py
@@ -13,6 +13,7 @@
# under the License.
import pytest
+import time
from devops.helpers import helpers
from tcp_tests import logger
@@ -33,13 +34,14 @@
'EXTRA_PARAMS': {
'envs': [
"tests_set=-k "
- "'not test_ceph_health and not test_prometheus_alert_count'"
+ "'not salt_master and not test_ceph_health and not "
+ "test_prometheus_alert_count'"
]
}
}
JENKINS_START_TIMEOUT = 60
- JENKINS_BUILD_TIMEOUT = 60 * 15
+ JENKINS_BUILD_TIMEOUT = 60 * 25
def get_ceph_health(self, ssh, node_names):
"""Get Ceph health status on specified nodes
@@ -51,12 +53,36 @@
"""
return {
node_name: ssh.check_call(
- "ceph -s",
+ "ceph health",
node_name=node_name,
raise_on_err=False)['stdout_str']
for node_name in node_names
}
+ def wait_healthy_ceph(self,
+ ssh,
+ node_names=None,
+ time_sec=30):
+ ceph_health = ""
+ status = False
+
+ start_time = time.time()
+ while time.time() - start_time < time_sec and not status:
+ ceph_health = self.get_ceph_health(ssh, node_names)
+ status = all(["HEALTH_OK"
+ in status
+ for node, status
+ in ceph_health.items()])
+ if status:
+ break
+ LOG.info("Retry getting ceph health because Ceph is unhealthy: {}"
+ .format(ceph_health))
+ time.sleep(10)
+
+ error = "" if status \
+ else "Ceph health is not OK: {0}".format(ceph_health)
+ return status, error
+
@pytest.mark.grab_versions
@pytest.mark.restart_osd_node
def test_restart_osd_node(
@@ -69,11 +95,9 @@
Scenario:
1. Find Ceph OSD nodes
- 2. Check Ceph cluster health before node restart (skipped until
- PROD-31374 is fixed)
+ 2. Check Ceph cluster health before node restart
3. Restart 1 Ceph OSD node
- 4. Check Ceph cluster health after node restart (skipped until
- PROD-31374 is fixed)
+ 4. Check Ceph cluster health after node restart
5. Run Tempest smoke test suite
6. Run test_ceph_status.py::test_ceph_osd and
test_services.py::test_check_services[osd] sanity tests
@@ -93,11 +117,9 @@
# Check Ceph cluster health before node restart
show_step(2)
- ceph_health = self.get_ceph_health(ssh, osd_hosts)
- # FIXME: uncomment the check once PROD-31374 is fixed
- # status = all(
- # ["OK" in status for node, status in ceph_health.items()])
- # assert status, "Ceph health is not OK: {0}".format(ceph_health)
+ result, error = self.wait_healthy_ceph(ssh=ssh,
+ node_names=osd_hosts)
+ assert result, error
# Restart a Ceph OSD node
show_step(3)
@@ -118,11 +140,10 @@
# Check Ceph cluster health after node restart
show_step(4)
- ceph_health = self.get_ceph_health(ssh, osd_hosts) # noqa
- # FIXME: uncomment the check once PROD-31374 is fixed
- # status = all(
- # ["OK" in status for node, status in ceph_health.items()])
- # assert status, "Ceph health is not OK: {0}".format(ceph_health)
+ result, error = self.wait_healthy_ceph(ssh=ssh,
+ node_names=osd_hosts,
+ time_sec=120)
+ assert result, error
# Run Tempest smoke test suite
show_step(5)
@@ -165,11 +186,9 @@
Scenario:
1. Find Ceph CMN nodes
- 2. Check Ceph cluster health before node restart (skipped until
- PROD-31374 is fixed)
+ 2. Check Ceph cluster health before node restart
3. Restart 1 Ceph CMN node
- 4. Check Ceph cluster health after node restart (skipped until
- PROD-31374 is fixed)
+ 4. Check Ceph cluster health after node restart
5. Run Tempest smoke test suite
6. Run test_ceph_status.py::test_ceph_replicas and
test_services.py::test_check_services[cmn] sanity tests
@@ -189,11 +208,9 @@
# Check Ceph cluster health before node restart
show_step(2)
- ceph_health = self.get_ceph_health(ssh, cmn_hosts)
- # FIXME: uncomment the check once PROD-31374 is fixed
- # status = all(
- # ["OK" in status for node, status in ceph_health.items()])
- # assert status, "Ceph health is not OK: {0}".format(ceph_health)
+ result, error = self.wait_healthy_ceph(ssh=ssh,
+ node_names=cmn_hosts)
+ assert result, error
# Restart a Ceph CMN node
show_step(3)
@@ -214,11 +231,10 @@
# Check Ceph cluster health after node restart
show_step(4)
- ceph_health = self.get_ceph_health(ssh, cmn_hosts) # noqa
- # FIXME: uncomment the check once PROD-31374 is fixed
- # status = all(
- # ["OK" in status for node, status in ceph_health.items()])
- # assert status, "Ceph health is not OK: {0}".format(ceph_health)
+ result, error = self.wait_healthy_ceph(ssh=ssh,
+ node_names=cmn_hosts,
+ time_sec=120)
+ assert result, error
# Run Tempest smoke test suite
show_step(5)
@@ -261,11 +277,9 @@
Scenario:
1. Find Ceph RGW nodes
- 2. Check Ceph cluster health before node restart (skipped until
- PROD-31374 is fixed)
+ 2. Check Ceph cluster health before node restart
3. Restart 1 Ceph RGW node
- 4. Check Ceph cluster health after node restart (skipped until
- PROD-31374 is fixed)
+ 4. Check Ceph cluster health after node restart
5. Run Tempest smoke test suite
6. Run test_services.py::test_check_services[rgw] sanity test
@@ -284,11 +298,9 @@
# Check Ceph cluster health before node restart
show_step(2)
- ceph_health = self.get_ceph_health(ssh, rgw_hosts)
- # FIXME: uncomment the check once PROD-31374 is fixed
- # status = all(
- # ["OK" in status for node, status in ceph_health.items()])
- # assert status, "Ceph health is not OK: {0}".format(ceph_health)
+ result, error = self.wait_healthy_ceph(ssh=ssh,
+ node_names=rgw_hosts)
+ assert result, error
# Restart a Ceph RGW node
show_step(3)
@@ -309,11 +321,11 @@
# Check Ceph cluster health after node restart
show_step(4)
- ceph_health = self.get_ceph_health(ssh, rgw_hosts) # noqa
- # FIXME: uncomment the check once PROD-31374 is fixed
- # status = all(
- # ["OK" in status for node, status in ceph_health.items()])
- # assert status, "Ceph health is not OK: {0}".format(ceph_health)
+ result, error = self.wait_healthy_ceph(ssh=ssh,
+ node_names=rgw_hosts,
+ time_sec=120)
+
+ assert result, error
# Run Tempest smoke test suite
show_step(5)
@@ -384,9 +396,9 @@
# STEP #2
show_step(2)
# Get the ceph health output before restart
- health_before = self.get_ceph_health(underlay, osd_node_names)
- assert all(["OK" in p for n, p in health_before.items()]), (
- "'Ceph health is not ok from node: {0}".format(health_before))
+ result, error = self.wait_healthy_ceph(ssh=underlay,
+ node_names=osd_node_names)
+ assert result, error
# STEP #3
show_step(3)
@@ -399,9 +411,10 @@
# STEP #4
show_step(4)
# Get the ceph health output after restart
- health_after = self.get_ceph_health(underlay, osd_node_names)
- assert all(["OK" in p for n, p in health_before.items()]), (
- "'Ceph health is not ok from node: {0}".format(health_after))
+ result, error = self.wait_healthy_ceph(ssh=underlay,
+ node_names=osd_node_names)
+
+ assert result, error
rally.run_container()
@@ -451,9 +464,10 @@
# STEP #2
show_step(2)
# Get the ceph health output before restart
- health_before = self.get_ceph_health(underlay, cmn_node_names)
- assert all(["OK" in p for n, p in health_before.items()]), (
- "'Ceph health is not ok from node: {0}".format(health_before))
+ result, error = self.wait_healthy_ceph(ssh=underlay,
+ node_names=cmn_node_names)
+
+ assert result, error
# STEP #3
show_step(3)
@@ -466,9 +480,11 @@
# STEP #4
show_step(4)
# Get the ceph health output after restart
- health_after = self.get_ceph_health(underlay, cmn_node_names)
- assert all(["OK" in p for n, p in health_before.items()]), (
- "'Ceph health is not ok from node: {0}".format(health_after))
+ result, error = self.wait_healthy_ceph(ssh=underlay,
+ node_names=cmn_node_names,
+ time_sec=120)
+
+ assert result, error
rally.run_container()
@@ -521,9 +537,9 @@
# STEP #2
show_step(2)
# Get the ceph health output before restart
- health_before = self.get_ceph_health(underlay, rgw_node_names)
- assert all(["OK" in p for n, p in health_before.items()]), (
- "'Ceph health is not ok from node: {0}".format(health_before))
+ result, error = self.wait_healthy_ceph(ssh=underlay,
+ node_names=rgw_node_names)
+ assert result, error
# STEP #3
show_step(3)
@@ -536,9 +552,10 @@
# STEP #4
show_step(4)
# Get the ceph health output after restart
- health_after = self.get_ceph_health(underlay, rgw_node_names)
- assert all(["OK" in p for n, p in health_before.items()]), (
- "'Ceph health is not ok from node: {0}".format(health_after))
+ result, error = self.wait_healthy_ceph(ssh=underlay,
+ node_names=rgw_node_names,
+ time_sec=120)
+ assert result, error
rally.run_container()
diff --git a/tcp_tests/tests/system/test_mcp_update.py b/tcp_tests/tests/system/test_mcp_update.py
index 45e3cdd..c6033c1 100644
--- a/tcp_tests/tests/system/test_mcp_update.py
+++ b/tcp_tests/tests/system/test_mcp_update.py
@@ -67,14 +67,14 @@
@pytest.fixture(scope='class')
-def dont_switch_to_proposed(request):
- return request.config.getoption("--dont-switch-to-proposed")
+def update_to_tag(request):
+ return request.config.getoption("--update-to-tag")
@pytest.fixture(scope='class')
def switch_to_proposed_pipelines(reclass_actions, salt_actions,
- dont_switch_to_proposed):
- if dont_switch_to_proposed:
+ update_to_tag):
+ if update_to_tag:
return True
reclass = reclass_actions
@@ -168,6 +168,7 @@
@pytest.mark.usefixtures("switch_to_proposed_pipelines",
+ "update_to_tag",
"wa_for_galera_clustercheck_password_prod35705",
"wa_for_alerta_password_prod35958")
class TestUpdateMcpCluster(object):
@@ -180,7 +181,7 @@
@pytest.mark.parametrize("_", [settings.ENV_NAME])
@pytest.mark.run_mcp_update
def test_update_drivetrain(self, salt_actions, drivetrain_actions,
- show_step, _):
+ show_step, update_to_tag, _):
"""Updating DriveTrain component to release/proposed/2019.2.0 version
Scenario:
@@ -206,7 +207,8 @@
show_step(2)
job_name = 'git-mirror-downstream-mk-pipelines'
job_parameters = {
- 'BRANCHES': 'release/proposed/2019.2.0'
+ 'BRANCHES': '*' or
+ 'release/proposed/2019.2.0'
}
job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
@@ -219,7 +221,8 @@
show_step(3)
job_name = 'git-mirror-downstream-pipeline-library'
job_parameters = {
- 'BRANCHES': 'release/proposed/2019.2.0'
+ 'BRANCHES': '*' or
+ 'release/proposed/2019.2.0'
}
job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
@@ -233,9 +236,10 @@
job_name = 'upgrade-mcp-release'
job_parameters = {
- 'GIT_REFSPEC': 'release/proposed/2019.2.0',
- 'MK_PIPELINES_REFSPEC': 'release/proposed/2019.2.0',
- 'TARGET_MCP_VERSION': '2019.2.0',
+ 'GIT_REFSPEC': update_to_tag or 'release/proposed/2019.2.0',
+ 'MK_PIPELINES_REFSPEC':
+ update_to_tag or 'release/proposed/2019.2.0',
+ 'TARGET_MCP_VERSION': update_to_tag or '2019.2.0',
"DRIVE_TRAIN_PARAMS": {
"OS_DIST_UPGRADE": True,
"OS_UPGRADE": True,
@@ -499,7 +503,7 @@
job_result, job_description = dt.start_job_on_jenkins(
job_name='deploy-upgrade-rabbitmq',
job_parameters=job_parameters,
- build_timeout=40 * 60
+ build_timeout=50 * 60
)
assert job_result == 'SUCCESS', job_description
@@ -609,7 +613,7 @@
job_parameters = {
"TARGET_SERVERS": target,
"OS_DIST_UPGRADE": True,
- "UPGRADE_SALTSTACK": False,
+ "UPGRADE_SALTSTACK": True,
"OS_UPGRADE": True,
"INTERACTIVE": False}
job_result, job_description = drivetrain_actions.start_job_on_jenkins(