Merge "Add getting logs from jobs Related-Prod:PROD-36500"
diff --git a/jobs/global.yaml b/jobs/global.yaml
index 62adb50..d0551a8 100644
--- a/jobs/global.yaml
+++ b/jobs/global.yaml
@@ -4,7 +4,13 @@
       Do not edit this job through the web ! <br>
       Please use jenkins-job-builder in git <br>
       git clone ssh://gerrit.mcp.mirantis.com:29418/mcp/tcp-qa
-    current-version: 2019.2.16
-    previous-version: 2019.2.15
+
+      <h1><font color='red'>
+      <!--- INSERT ALERT HERE vvvvvvv --->
+
+      <!--- INSERT ALERT HERE ^^^^^^^ --->
+      </font></h1>
+    current-version: 2019.2.17
+    previous-version: 2019.2.16
     disabled-proposed: false
     disabled-2019-2-0: true
\ No newline at end of file
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index ce9f83f..5b4c174 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -211,18 +211,22 @@
             }
         }
         //run upgrade env to proposed
-        if (env.RUN_UPGRADE_AFTER_JOB == "true") {
+        if (env.RUN_UPGRADE_AFTER_JOB == "true" && currentBuild.result == 'SUCCESS') {
             network_backend = env.PLATFORM_STACK_INSTALL.contains("contrail") ? 'contrail' : 'dvr'
             upgrade_job = "mcp-update-${env.TEMPEST_IMAGE_VERSION}-${network_backend}-sl"
-              def deploy = build job: "${upgrade_job}",
-                  parameters: [
-                      string(name: 'PARENT_NODE_NAME', value: "openstack_slave_${env.LAB_CONFIG_NAME}"),
-                      string(name: 'TCP_QA_REFS', value: env.TCP_QA_REFS),
-                      string(name: 'TEMPEST_PATTERN', value: env.TEMPEST_PATTERN),
-                      string(name: 'NODE', value: "openstack_slave_${env.LAB_CONFIG_NAME}")
-                  ],
-                  wait: false,
-                  propagate: false
+            if (env.IPMI_CREDS) {
+                upgrade_job = "mcp-update-bm-b300-queens-ovs-maas"
+            }
+            def deploy = build job: "${upgrade_job}",
+                parameters: [
+                    string(name: 'PARENT_NODE_NAME', value: "openstack_slave_${env.LAB_CONFIG_NAME}"),
+                    string(name: 'TCP_QA_REFS', value: env.TCP_QA_REFS),
+                    string(name: 'PASSED_STEPS', value: steps),
+                    string(name: 'TEMPEST_TEST_SUITE_NAME', value: env.TEMPEST_TEST_SUITE_NAME),
+                    string(name: 'NODE', value: "openstack_slave_${env.LAB_CONFIG_NAME}")
+                ],
+                wait: false,
+                propagate: false
         }
     } // try
   } // node
diff --git a/jobs/pipelines/run-test-scenarios.groovy b/jobs/pipelines/run-test-scenarios.groovy
index 76d5cb2..c85d9fe 100644
--- a/jobs/pipelines/run-test-scenarios.groovy
+++ b/jobs/pipelines/run-test-scenarios.groovy
@@ -25,6 +25,7 @@
 def steps = env.PASSED_STEPS
 def make_snapshot_stages = false
 env.LAB_CONFIG_NAME = env.LAB_CONFIG_NAME ?: env.ENV_NAME
+env.TEST_PLAN_NAME_PREFIX = '[2019.2.0-update]Upgraded'
 
 timeout(time: 23, unit: 'HOURS') {
     node ("${PARENT_NODE_NAME}") {
@@ -51,35 +52,44 @@
                     currentBuild.result = 'FAILURE'
                 }
             } // stage("Run tests")
-
-            stage("Archive all xml reports") {
-                dir("${env.slave_workdir }") {
-                    archiveArtifacts artifacts: "**/*.xml,**/*.log"
-                    }
+            if (fileExists("jenkins_agent_description.txt")) {
+                def String description  = readFile("jenkins_agent_description.txt")
+                currentBuild.description = "${description}"
             }
-
-            stage("report results to testrail from jenkins master") {
-                if ("${env.REPORT_TO_TESTRAIL}" != "false") {
-                    common.infoMsg("Running on: " + env.PARENT_NODE_NAME, "blue")
-                    shared.verbose_sh("""\
-                           [ -d /home/jenkins/venv_testrail_reporter ] || virtualenv --python=python3.7 /home/jenkins/venv_testrail_reporter""", true, false, true)
-                    shared.run_cmd("""\
-                            . /home/jenkins/venv_testrail_reporter/bin/activate; pip install git+https://github.com/dis-xcom/testrail_reporter -U""")
-                    shared.swarm_testrail_report(steps, env.PARENT_NODE_NAME)
-                } else {
-                    common.infoMsg("REPORT_TO_TESTRAIL is set to FALSE. Skipped this step ")
+            if (currentBuild.result != 'FAILURE') {
+                stage("Archive all xml reports") {
+                    dir("${env.slave_workdir }") {
+                        archiveArtifacts artifacts: "**/*.xml,**/*.log"
+                        }
                 }
-            } // stage("report results to testrail from jenkins master")
-            stage("Store TestRail reports to job description") {
-                if ("${env.REPORT_TO_TESTRAIL}" != "false") {
-                    if (fileExists("description.txt")) {
-                        def String description  = readFile("description.txt")
-                        currentBuild.description += "${description}"
+                stage("report results to testrail from jenkins master") {
+                    if ("${env.REPORT_TO_TESTRAIL}" != "false") {
+                        common.infoMsg("Running on: " + env.PARENT_NODE_NAME, "blue")
+                        shared.verbose_sh("""\
+                               [ -d /home/jenkins/venv_testrail_reporter ] || virtualenv --python=python3.7 /home/jenkins/venv_testrail_reporter""", true, false, true)
+                        shared.run_cmd("""\
+                                . /home/jenkins/venv_testrail_reporter/bin/activate; pip install git+https://github.com/dis-xcom/testrail_reporter -U""")
+                        shared.swarm_testrail_report(steps, env.PARENT_NODE_NAME)
+                    } else {
+                        common.infoMsg("REPORT_TO_TESTRAIL is set to FALSE. Skipped this step ")
                     }
-                } else {
-                    common.infoMsg("REPORT_TO_TESTRAIL is set to FALSE. Skipped this step ")
-                }
-            } // stage("Store TestRail reports to job description")
+                } // stage("report results to testrail from jenkins master")
+                stage("Store TestRail reports to job description") {
+                    if ("${env.REPORT_TO_TESTRAIL}" != "false") {
+                        if (fileExists("description.txt")) {
+                            def String description  = readFile("description.txt")
+                            currentBuild.description += "${description}"
+                        }
+                    } else {
+                        common.infoMsg("REPORT_TO_TESTRAIL is set to FALSE. Skipped this step ")
+                        println(currentBuild.result)
+                    }
+                } // stage("Store TestRail reports to job description")
+            }//report if success
+            else{
+                common.infoMsg("Upgrade status different from successful. Skipped report steps.")
+                println("Current result is " + currentBuild.result)
+            }
         } // dir
     } // node
 } // timeout
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
index e652b72..414cdec 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
@@ -284,9 +284,11 @@
                         [ -d /home/jenkins/venv_testrail_analyzer ] || virtualenv --python=python3 /home/jenkins/venv_testrail_analyzer
                     """, true, false, true)
                     shared.run_cmd("""\
-                        . /home/jenkins/venv_testrail_analyzer/bin/activate; pip install git+https://review.gerrithub.io/ibumarskov/testrail-reporter@refs/changes/94/514594/3
+                        . /home/jenkins/venv_testrail_analyzer/bin/activate; pip install git+https://review.gerrithub.io/ibumarskov/testrail-reporter
                     """)
                     shared.update_working_dir()
+                    // Add info about foundation node for description
+                    writeFile(file: "jenkins_agent_description.txt", text: jenkins_agent_description, encoding: "UTF-8")
                 }
 
                 withCredentials([
diff --git a/jobs/project.yaml b/jobs/project.yaml
index c2e37a7..145ae0d 100644
--- a/jobs/project.yaml
+++ b/jobs/project.yaml
@@ -23,6 +23,7 @@
       - bm-e7-cicd-pike-ovs-maas
       - bm-e7-cicd-pike-odl-maas
       - bm-b300-cicd-queens-ovs-maas
+      - released-bm-b300-cicd-queens-ovs-maas
       - released-bm-pike-ovs
       # --- Released envs ------
       - deploy-released:
diff --git a/jobs/templates/2019.2.0-heat-cicd-pike-dvr-sl.yml b/jobs/templates/2019.2.0-heat-cicd-pike-dvr-sl.yml
index 66cd438..be346cd 100644
--- a/jobs/templates/2019.2.0-heat-cicd-pike-dvr-sl.yml
+++ b/jobs/templates/2019.2.0-heat-cicd-pike-dvr-sl.yml
@@ -150,7 +150,7 @@
         description: ''
         name: ENV_MANAGER
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
         name: OS_AUTH_URL
         trim: 'false'
@@ -202,7 +202,7 @@
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
     - bool:
-        default: false
+        default: true
         description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
         name: UPGRADE_SALTSTACK
     pipeline-scm:
diff --git a/jobs/templates/2019.2.0-heat-cicd-queens-contrail41-sl.yml b/jobs/templates/2019.2.0-heat-cicd-queens-contrail41-sl.yml
index 687d47d..d8302c2 100644
--- a/jobs/templates/2019.2.0-heat-cicd-queens-contrail41-sl.yml
+++ b/jobs/templates/2019.2.0-heat-cicd-queens-contrail41-sl.yml
@@ -150,7 +150,7 @@
         description: ''
         name: ENV_MANAGER
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
         name: OS_AUTH_URL
         trim: 'false'
@@ -207,7 +207,7 @@
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
     - bool:
-        default: false
+        default: true
         description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
         name: UPGRADE_SALTSTACK
     pipeline-scm:
diff --git a/jobs/templates/2019.2.0-heat-cicd-queens-dvr-sl.yml b/jobs/templates/2019.2.0-heat-cicd-queens-dvr-sl.yml
index 230379f..5e575af 100644
--- a/jobs/templates/2019.2.0-heat-cicd-queens-dvr-sl.yml
+++ b/jobs/templates/2019.2.0-heat-cicd-queens-dvr-sl.yml
@@ -150,7 +150,7 @@
         description: ''
         name: ENV_MANAGER
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
         name: OS_AUTH_URL
         trim: 'false'
@@ -207,7 +207,7 @@
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
     - bool:
-        default: false
+        default: true
         description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
         name: UPGRADE_SALTSTACK
     pipeline-scm:
diff --git a/jobs/templates/bm-b300-cicd-queens-ovs-maas.yml b/jobs/templates/bm-b300-cicd-queens-ovs-maas.yml
index d07fc2d..e64a02c 100644
--- a/jobs/templates/bm-b300-cicd-queens-ovs-maas.yml
+++ b/jobs/templates/bm-b300-cicd-queens-ovs-maas.yml
@@ -138,7 +138,7 @@
         description: ''
         name: ENV_MANAGER
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
         name: OS_AUTH_URL
         trim: 'false'
@@ -206,7 +206,7 @@
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
     - bool:
-        default: false
+        default: true
         description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
         name: UPGRADE_SALTSTACK
     pipeline-scm:
diff --git a/jobs/templates/bm-cicd-pike-ovs-maas.yml b/jobs/templates/bm-cicd-pike-ovs-maas.yml
index d3dfb38..a3b6e4a 100644
--- a/jobs/templates/bm-cicd-pike-ovs-maas.yml
+++ b/jobs/templates/bm-cicd-pike-ovs-maas.yml
@@ -138,7 +138,7 @@
         description: ''
         name: ENV_MANAGER
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
         name: OS_AUTH_URL
         trim: 'false'
diff --git a/jobs/templates/bm-cicd-queens-ovs-maas.yml b/jobs/templates/bm-cicd-queens-ovs-maas.yml
index 2b104f7..73b9196 100644
--- a/jobs/templates/bm-cicd-queens-ovs-maas.yml
+++ b/jobs/templates/bm-cicd-queens-ovs-maas.yml
@@ -138,7 +138,7 @@
         description: ''
         name: ENV_MANAGER
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
         name: OS_AUTH_URL
         trim: 'false'
@@ -206,7 +206,7 @@
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
     - bool:
-        default: false
+        default: true
         description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
         name: UPGRADE_SALTSTACK
     pipeline-scm:
diff --git a/jobs/templates/bm-e7-cicd-pike-odl-maas.yml b/jobs/templates/bm-e7-cicd-pike-odl-maas.yml
index 1b21f1c..26e463e 100644
--- a/jobs/templates/bm-e7-cicd-pike-odl-maas.yml
+++ b/jobs/templates/bm-e7-cicd-pike-odl-maas.yml
@@ -43,6 +43,11 @@
         name: MCP_VERSION
         trim: 'false'
     - string:
+        default: ''
+        description: ''
+        name: SALT_MODELS_SYSTEM_REF_CHANGE
+        trim: 'false'
+    - string:
         default: sre-team-infra
         description: ''
         name: NODE_NAME
@@ -143,7 +148,7 @@
         description: ''
         name: ENV_MANAGER
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
         name: OS_AUTH_URL
         trim: 'false'
diff --git a/jobs/templates/bm-e7-cicd-pike-ovs-maas.yml b/jobs/templates/bm-e7-cicd-pike-ovs-maas.yml
index b0feb54..131f7e3 100644
--- a/jobs/templates/bm-e7-cicd-pike-ovs-maas.yml
+++ b/jobs/templates/bm-e7-cicd-pike-ovs-maas.yml
@@ -70,6 +70,11 @@
         name: TCP_QA_REFS
         trim: 'false'
     - string:
+        default: ''
+        description: ''
+        name: SALT_MODELS_SYSTEM_REF_CHANGE
+        trim: 'false'
+    - string:
         default: refs/heads/release/2019.2.0
         description: reference to patchset in pipeline-library
         name: PIPELINE_LIBRARY_REF
@@ -138,7 +143,7 @@
         description: ''
         name: ENV_MANAGER
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
         name: OS_AUTH_URL
         trim: 'false'
diff --git a/jobs/templates/deploy-released.yml b/jobs/templates/deploy-released.yml
index 95f2ff5..126b5e0 100644
--- a/jobs/templates/deploy-released.yml
+++ b/jobs/templates/deploy-released.yml
@@ -33,7 +33,7 @@
           name: OS_CREDENTIALS
           trim: 'false'
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: ''
         name: OS_AUTH_URL
         trim: 'false'
diff --git a/jobs/templates/deploy_bm.yml b/jobs/templates/deploy_bm.yml
index ebdb885..0e3c102 100644
--- a/jobs/templates/deploy_bm.yml
+++ b/jobs/templates/deploy_bm.yml
@@ -34,7 +34,7 @@
         name: PARENT_NODE_NAME
         trim: 'false'
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: ''
         name: OS_AUTH_URL
         trim: 'false'
diff --git a/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml b/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml
index c55e807..576936f 100644
--- a/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml
+++ b/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml
@@ -145,7 +145,7 @@
         description: ''
         name: ENV_MANAGER
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
         name: OS_AUTH_URL
         trim: 'false'
diff --git a/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml b/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml
index cae7d08..313f9c6 100644
--- a/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml
+++ b/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml
@@ -146,7 +146,7 @@
         description: ''
         name: ENV_MANAGER
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
         name: OS_AUTH_URL
         trim: 'false'
@@ -220,7 +220,7 @@
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
     - bool:
-        default: false
+        default: true
         description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
         name: UPGRADE_SALTSTACK
     pipeline-scm:
diff --git a/jobs/templates/heat-cicd-pike-dvr-sl.yml b/jobs/templates/heat-cicd-pike-dvr-sl.yml
index 0248d9d..2ade702 100644
--- a/jobs/templates/heat-cicd-pike-dvr-sl.yml
+++ b/jobs/templates/heat-cicd-pike-dvr-sl.yml
@@ -155,7 +155,7 @@
         description: ''
         name: ENV_MANAGER
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
         name: OS_AUTH_URL
         trim: 'false'
diff --git a/jobs/templates/heat-cicd-queens-contrail41-sl.yml b/jobs/templates/heat-cicd-queens-contrail41-sl.yml
index a7e8944..2eca95b 100644
--- a/jobs/templates/heat-cicd-queens-contrail41-sl.yml
+++ b/jobs/templates/heat-cicd-queens-contrail41-sl.yml
@@ -148,7 +148,7 @@
         description: ''
         name: ENV_MANAGER
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
         name: OS_AUTH_URL
         trim: 'false'
diff --git a/jobs/templates/heat-cicd-queens-dvr-sl.yml b/jobs/templates/heat-cicd-queens-dvr-sl.yml
index 3ba3d08..bcaaa84 100644
--- a/jobs/templates/heat-cicd-queens-dvr-sl.yml
+++ b/jobs/templates/heat-cicd-queens-dvr-sl.yml
@@ -149,7 +149,7 @@
         description: ''
         name: ENV_MANAGER
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
         name: OS_AUTH_URL
         trim: 'false'
@@ -206,7 +206,7 @@
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
     - bool:
-        default: false
+        default: true
         description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
         name: UPGRADE_SALTSTACK
     pipeline-scm:
diff --git a/jobs/templates/released-bm-b300-cicd-queens-ovs-maas.yml b/jobs/templates/released-bm-b300-cicd-queens-ovs-maas.yml
new file mode 100644
index 0000000..ea418dc
--- /dev/null
+++ b/jobs/templates/released-bm-b300-cicd-queens-ovs-maas.yml
@@ -0,0 +1,226 @@
+- job-template:
+    project-type: pipeline
+    description: '{job-description}'
+    concurrent: true
+    disabled: false
+    name: released-bm-b300-cicd-queens-ovs-maas
+    parameters:
+    - string:
+        default: released-bm-b300-cicd-queens-ovs-maas
+        description: ''
+        name: LAB_CONFIG_NAME
+        trim: 'false'
+    - string:
+        default: core,kvm,cicd
+        description: Comma-separated list of stacks to deploy the drivetrain (salt
+          cluster and cicd nodes)
+        name: DRIVETRAIN_STACK_INSTALL
+        trim: 'false'
+    - string:
+        default: '24000'
+        description: ''
+        name: DRIVETRAIN_STACK_INSTALL_TIMEOUT
+        trim: 'false'
+    - string:
+        default: openstack,ovs,ceph,stacklight
+        description: Comma-separated list of stacks to deploy the target platform
+          (openstack and additional components)
+        name: PLATFORM_STACK_INSTALL
+        trim: 'false'
+    - string:
+        default: '24000'
+        description: ''
+        name: PLATFORM_STACK_INSTALL_TIMEOUT
+        trim: 'false'
+    - string:
+        default: "{previous-version}"
+        description: ''
+        name: MCP_VERSION
+        trim: 'false'
+    - string:
+        default: sre-team-infra
+        description: ''
+        name: NODE_NAME
+        trim: 'false'
+    - string:
+        default: /home/jenkins/images/ubuntu-16-04-x64-mcp2019.2.0.qcow2
+        description: ''
+        name: MCP_IMAGE_PATH1604
+        trim: 'false'
+    - string:
+        default: /home/jenkins/images/cfg01-day01.qcow2
+        description: ''
+        name: IMAGE_PATH_CFG01_DAY01
+        trim: 'false'
+    - string:
+        default: cfg01.${{LAB_CONFIG_NAME}}-config-drive.iso
+        description: ISO name that will be generated and downloaded to the /home/jenkins/images/
+        name: CFG01_CONFIG_IMAGE_NAME
+        trim: 'false'
+    - string:
+        default: released-bm-b300-cicd-queens-ovs-maas
+        description: ''
+        name: ENV_NAME
+        trim: 'false'
+    - string:
+        default: ''
+        description: |-
+          Example: refs/changes/89/411189/36
+          (for now - only one reference allowed)
+        name: TCP_QA_REFS
+        trim: 'false'
+    - string:
+        default: refs/tags/{previous-version}
+        description: reference to patchset in pipeline-library
+        name: PIPELINE_LIBRARY_REF
+        trim: 'false'
+    - string:
+        default: refs/tags/{previous-version}
+        description: reference to patchset in mk-pipelines
+        name: MK_PIPELINES_REF
+        trim: 'false'
+    - string:
+        default: refs/tags/{previous-version}
+        description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
+          value
+        name: COOKIECUTTER_TEMPLATE_COMMIT
+        trim: 'false'
+    - string:
+        default: refs/tags/{previous-version}
+        description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
+          value
+        name: SALT_MODELS_SYSTEM_COMMIT
+        trim: 'false'
+    - string:
+        default: -m "run_cvp_func_sanity|run_cvp_tempest|run_stacklight"
+        description: |-
+          Pytest option -k or -m, with expression to select necessary tests.
+          Additional pytest options are allowed.
+        name: RUN_TEST_OPTS
+        trim: 'false'
+    - bool:
+        default: true
+        description: ''
+        name: SHUTDOWN_ENV_ON_TEARDOWN
+    - string:
+        default: ''
+        description: ''
+        name: COOKIECUTTER_REF_CHANGE
+        trim: 'false'
+    - string:
+        default: ''
+        description: ''
+        name: ENVIRONMENT_TEMPLATE_REF_CHANGE
+        trim: 'false'
+    - string:
+        default: '[MCP1.1_QUEENS]Tempest'
+        description: ''
+        name: TEMPEST_TEST_SUITE_NAME
+        trim: 'false'
+    - string:
+        default: queens
+        description: ''
+        name: TEMPEST_IMAGE_VERSION
+        trim: 'false'
+    - string:
+        default: ''
+        description: ''
+        name: UPDATE_REPO_CUSTOM_TAG
+        trim: 'false'
+    - bool:
+        default: true
+        description: If set, reports will be created in TestRail for this build
+        name: REPORT_TO_TESTRAIL
+    - choice:
+        choices:
+        - heat
+        - devops
+        description: ''
+        name: ENV_MANAGER
+    - string:
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
+        description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
+        name: OS_AUTH_URL
+        trim: 'false'
+    - string:
+        default: sre-team
+        description: OpenStack project (tenant) name
+        name: OS_PROJECT_NAME
+        trim: 'false'
+    - string:
+        default: default
+        description: OpenStack user domain name
+        name: OS_USER_DOMAIN_NAME
+        trim: 'false'
+    - string:
+        default: sre-qa-ci-eu
+        description: Jenkins credentials ID with username and password to create a
+          heat stack in OpenStack
+        name: OS_CREDENTIALS
+        trim: 'false'
+    - string:
+        default: eu-cloud-low-flavors.env
+        description: |-
+          Heat template environment filename with 'parameter_defaults' dict, placed in tcp_tests/templates/_heat_environments/
+          , for example: microcloud-8116.env
+        name: LAB_PARAM_DEFAULTS
+        trim: 'false'
+    - string:
+        default: refs/tags/{previous-version}
+        description: ''
+        name: JENKINS_PIPELINE_BRANCH
+        trim: 'false'
+    - string:
+        default: refs/tags/{previous-version}
+        description: ''
+        name: MCP_COMMON_SCRIPTS_REFS
+        trim: 'false'
+    - string:
+        default: proposed
+        description: "{previous-version}"
+        name: UPDATE_VERSION
+        trim: 'false'
+    - string:
+        name: IPMI_CREDS
+        default: 'lab_engineer'
+    - string:
+        default: ''
+        description: ''
+        name: TEMPEST_EXTRA_ARGS
+        trim: 'false'
+    - password:
+        name: CISCO_PASS
+        default: '1fac0DlhILBo'
+    - string:
+        default: ''
+        description: ''
+        name: SALT_MODELS_SYSTEM_REF_CHANGE
+        trim: 'false'
+    - string:
+        default: ''
+        description: ''
+        name: BATCH_SIZE
+        trim: 'false'
+    - bool:
+        default: false
+        description: Whether to perform dist-upgrade on virtual nodes during deployment
+        name: DIST_UPGRADE_NODES
+    - bool:
+        default: true
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
+    - bool:
+        name: RUN_UPGRADE_AFTER_JOB
+        default: true
+        description: "Upgrade and test after deploy?"
+    pipeline-scm:
+      lightweight-checkout: false
+      scm:
+      - git:
+          branches:
+          - FETCH_HEAD
+          refspec: ${{TCP_QA_REFS}}
+          url: https://gerrit.mcp.mirantis.com/mcp/tcp-qa
+      script-path: jobs/pipelines/deploy-cicd-and-run-tests.groovy
+    logrotate:
+      daysToKeep: 365
diff --git a/jobs/templates/released-bm-pike-ovs.yml b/jobs/templates/released-bm-pike-ovs.yml
index 88cbdef..ceb8563 100644
--- a/jobs/templates/released-bm-pike-ovs.yml
+++ b/jobs/templates/released-bm-pike-ovs.yml
@@ -133,7 +133,7 @@
         description: ''
         name: ENV_MANAGER
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
         name: OS_AUTH_URL
         trim: 'false'
@@ -186,7 +186,7 @@
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
     - bool:
-        default: false
+        default: true
         description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
         name: UPGRADE_SALTSTACK
     pipeline-scm:
diff --git a/jobs/templates/released-heat-cicd-pike-contrail41-sl.yml b/jobs/templates/released-heat-cicd-pike-contrail41-sl.yml
index f0670f0..0e00a79 100644
--- a/jobs/templates/released-heat-cicd-pike-contrail41-sl.yml
+++ b/jobs/templates/released-heat-cicd-pike-contrail41-sl.yml
@@ -153,7 +153,7 @@
         description: ''
         name: ENV_MANAGER
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
         name: OS_AUTH_URL
         trim: 'false'
diff --git a/jobs/templates/released-heat-cicd-pike-dvr-sl.yml b/jobs/templates/released-heat-cicd-pike-dvr-sl.yml
index 9273793..da9c52a 100644
--- a/jobs/templates/released-heat-cicd-pike-dvr-sl.yml
+++ b/jobs/templates/released-heat-cicd-pike-dvr-sl.yml
@@ -153,7 +153,7 @@
         description: ''
         name: ENV_MANAGER
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
         name: OS_AUTH_URL
         trim: 'false'
diff --git a/jobs/templates/released-heat-cicd-queens-contrail41-sl.yml b/jobs/templates/released-heat-cicd-queens-contrail41-sl.yml
index 4ddd929..89f5d39 100644
--- a/jobs/templates/released-heat-cicd-queens-contrail41-sl.yml
+++ b/jobs/templates/released-heat-cicd-queens-contrail41-sl.yml
@@ -136,7 +136,7 @@
         name: TEMPEST_TARGET
         trim: 'false'
     - string:
-        default: proposed
+        default: ''
         description: ''
         name: UPDATE_REPO_CUSTOM_TAG
         trim: 'false'
@@ -151,7 +151,7 @@
         description: ''
         name: ENV_MANAGER
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
         name: OS_AUTH_URL
         trim: 'false'
diff --git a/jobs/templates/released-heat-cicd-queens-dvr-sl.yml b/jobs/templates/released-heat-cicd-queens-dvr-sl.yml
index 01921e0..8d845b6 100644
--- a/jobs/templates/released-heat-cicd-queens-dvr-sl.yml
+++ b/jobs/templates/released-heat-cicd-queens-dvr-sl.yml
@@ -153,7 +153,7 @@
         description: ''
         name: ENV_MANAGER
     - string:
-        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        default: https://keystone.ic-eu.ssl.mirantis.net/v3
         description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
         name: OS_AUTH_URL
         trim: 'false'
diff --git a/jobs/templates/test-scenarios.yml b/jobs/templates/test-scenarios.yml
index fb90131..71e3643 100644
--- a/jobs/templates/test-scenarios.yml
+++ b/jobs/templates/test-scenarios.yml
@@ -63,12 +63,16 @@
     test_scenario:
 
       - backup-saltmaster:
-          run-test-opts: '--maxfail=1 -k TestBackupRestoreMaster and not maas'
+          run-test-opts: |-
+            tcp_tests/tests/system/test_backup_restore.py::TestBackupRestoreMaster::test_salt_master_manual_backup_restore_pipeline \
+            tcp_tests/tests/system/test_backup_restore.py::TestBackupRestoreMaster::test_salt_master_manual_backup_restore
           deployment: heat-cicd-queens-dvr-sl
           display-name: Backup/Restore SaltMaster
 
       - backup-saltmaster-with-maas:
-         run-test-opts: '--maxfail=1 -k TestBackupRestoreMaster and maas'
+         run-test-opts: |-
+           tcp_tests/tests/system/test_backup_restore.py::TestBackupRestoreMaster::test_maas_manual_backup_restore_pipeline \
+           tcp_tests/tests/system/test_backup_restore.py::TestBackupRestoreMaster::test_maas_backup_restore_manual
          deployment: bm-cicd-queens-ovs-maas
          display-name: Backup/Restore SaltMaster (with MAAS)
 
@@ -103,14 +107,14 @@
           run-test-opts: '--maxfail=1 -k TestCephMon'
           deployment: heat-cicd-queens-dvr-sl
           display-name: Add/Remove CMN node
-      - ceph_mgr-queens-dvr-sl:
-          run-test-opts: '--maxfail=1 -k TestCephMgr'
-          deployment: heat-cicd-queens-dvr-sl
-          display-name: Add/Remove MGR node
       - ceph_rgw-queens-dvr-sl:
           run-test-opts: '--maxfail=1 -k TestCephRgw'
           deployment: heat-cicd-queens-dvr-sl
           display-name: Add/Remove RGW node
+      - ceph_failover:
+          run-test-opts: '--maxfail=1 -k TestFailoverCeph'
+          deployment: heat-cicd-queens-dvr-sl
+          display-name: Failover tests for Ceph nodes
     jobs:
       - '{test_scenario}'
     logrotate:
@@ -146,36 +150,49 @@
     test-pattern-with-contrail:
       ^heat_tempest_plugin.tests*|^tempest.api.image*|^tempest_horizon*|^tempest.api.identity*|^tempest.api.network*|^tempest.api.compute*|^tempest.api.volume*|^tempest.scenario*|^tempest.api.object_storage*
     test_scenario:
+      - mcp-update-bm-b300-queens-ovs-maas:
+          deployment: released-bm-b300-cicd-queens-ovs-maas
+          run-test-opts: '{test-opt}'
+          tempest_pattern: 'tempest'
+          display-name: MCP update (BM-B300-queens + MAAS)
+          tempest_test_suite: "[MCP1.1_QUEENS]Tempest"
+          passed_steps: "hardware,create_model,salt,core,kvm,cicd,openstack,ovs,ceph,stacklight"
+
       - mcp-update-pike-dvr-sl:
           deployment: released-heat-cicd-pike-dvr-sl
-          disabled: true
           run-test-opts: '{test-opt}'
           tempest_pattern: 'tempest'
           display-name: MCP update (pike)
+          tempest_test_suite: "[MCP1.1_PIKE]Tempest"
+          passed_steps: "hardware,create_model,salt,core,cicd,openstack,ovs,ceph,stacklight"
 
       - mcp-update-queens-dvr-sl:
           deployment: released-heat-cicd-queens-dvr-sl
-          disabled: true
           tempest_pattern: 'tempest'
           run-test-opts: '{test-opt}'
           display-name: MCP update (queens)
+          tempest_test_suite: "[MCP1.1_QUEENS]Tempest"
+          passed_steps: "hardware,create_model,salt,core,cicd,openstack,ovs,ceph,stacklight"
 
       - mcp-update-pike-contrail-sl:
           deployment: released-heat-cicd-pike-contrail41-sl
-          disabled: true
           tempest_pattern: '{test-pattern-with-contrail}'
           run-test-opts: '{test-opt-with-contrail}'
           display-name: MCP update (pike + OC)
+          tempest_test_suite: "[MCP1.1_PIKE]Tempest"
+          passed_steps: "hardware,create_model,salt,core,cicd,openstack,contrail,ceph,stacklight"
 
       - mcp-update-queens-contrail-sl:
           deployment: released-heat-cicd-queens-contrail41-sl
-          disabled: true
           tempest_pattern: '{test-pattern-with-contrail}'
           run-test-opts: '{test-opt-with-contrail}'
           display-name: MCP update (queens + OC)
+          tempest_test_suite: "[MCP1.1_QUEENS]Tempest"
+          passed_steps: "hardware,create_model,salt,core,cicd,openstack,contrail,ceph,stacklight"
 
       - os-update-pike-to-queens:
           deployment: heat-cicd-pike-dvr-sl
+          disabled: true
           run-test-opts: '-k TestUpdatePikeToQueens'
           display-name: Update Pike -> Queens
 
@@ -253,7 +270,7 @@
         name: MCP_VERSION
         trim: 'false'
     - string:
-        default: ''
+        default: '{passed_steps|}'
         description: 'Completed steps to install components on the environment.
                       If tests require some additional components, it may be installed in
                       appropriate fixtures, so set the PASSED_STEPS correctly for the
@@ -261,6 +278,12 @@
         name: PASSED_STEPS
         trim: 'false'
     - string:
+        default: '{tempest_test_suite|}'
+        description: '[MCP1.1_xxx]Tempest where xxx - MCP version
+                      using only for report to testrail'
+        name: TEMPEST_TEST_SUITE_NAME
+        trim: 'false'
+    - string:
         default: ''
         description: 'Example: refs/changes/89/411189/36
                        (for now - only one reference allowed)'
@@ -274,7 +297,7 @@
         name: RUN_TEST_OPTS
         trim: 'false'
     - text:
-        default: '{tempest_pattern}'
+        default: '{tempest_pattern|}'
         description: |-
           Examples: 'set=full','set=smoke' or test name.
         name: TEMPEST_PATTERN
diff --git a/jobs/view.yaml b/jobs/view.yaml
index c922ad9..a6caaf2 100644
--- a/jobs/view.yaml
+++ b/jobs/view.yaml
@@ -89,13 +89,17 @@
     filter-executors: true
     filter-queue: true
     job-name:
+      - deploy_bm
       - bm-cicd-pike-ovs-maas
       - bm-cicd-queens-ovs-maas
-      - deploy_bm
       - heat-bm-cicd-pike-contrail-sl
       - heat-bm-cicd-queens-contrail-sl
       - released-bm-pike-ovs
       - show_networks_used_by_libvirt
+      - bm-e7-cicd-pike-ovs-maas
+      - bm-e7-cicd-pike-odl-maas
+      - bm-b300-cicd-queens-ovs-maas
+      - released-bm-b300-cicd-queens-ovs-maas
     columns:
       - status
       - weather
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index 73a106a..8390c1b 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -289,6 +289,7 @@
                 string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
                 string(name: 'PIPELINE_LIBRARY_REF', value: "${pipeline_library_ref}"),
                 string(name: 'MK_PIPELINES_REF', value: "${mk_pipelines_ref}"),
+                string(name: 'SALT_MODELS_SYSTEM_REF_CHANGE', value: env.SALT_MODELS_SYSTEM_REF_CHANGE ?: ''),
                 string(name: 'COOKIECUTTER_TEMPLATE_COMMIT', value: "${cookiecutter_template_commit}"),
                 string(name: 'SALT_MODELS_SYSTEM_COMMIT', value: "${salt_models_system_commit}"),
                 string(name: 'COOKIECUTTER_REF_CHANGE', value: "${cookiecutter_ref_change}"),
@@ -445,6 +446,7 @@
                 string(name: 'DOMAIN_NAME', value: "${LAB_CONFIG_NAME}.local"),
                 string(name: 'REPOSITORY_SUITE', value: "${env.MCP_VERSION}"),
                 string(name: 'SALT_MODELS_SYSTEM_COMMIT', value: "${saltmodels_system_commit}"),
+                string(name: 'SALT_MODELS_SYSTEM_REF_CHANGE', value: env.SALT_MODELS_SYSTEM_REF_CHANGE ?: ''),
                 string(name: 'COOKIECUTTER_TEMPLATE_COMMIT', value: "${cookiecuttertemplate_commit}"),
                 string(name: 'COOKIECUTTER_REF_CHANGE', value: "${cookiecutter_ref_change}"),
                 string(name: 'ENVIRONMENT_TEMPLATE_REF_CHANGE', value: "${environment_template_ref_change}"),
@@ -498,6 +500,7 @@
                 string(name: 'DEPLOY_NETWORK_NETMASK', value: "${deploy_network_mask}"),
                 string(name: 'PIPELINE_LIBRARY_REF', value: "${pipeline_library_ref}"),
                 string(name: 'MK_PIPELINES_REF', value: "${mk_pipelines_ref}"),
+                string(name: 'SALT_MODELS_SYSTEM_REF_CHANGE', value: env.SALT_MODELS_SYSTEM_REF_CHANGE ?: ''),
                 string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
                 string(name: 'UPDATE_VERSION', value: "${update_version}"),
                 string(name: 'MCP_COMMON_SCRIPTS_REFS', value: "${mcp_common_scripts_ref}"),
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index c2e82d4..105c34d 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -286,6 +286,7 @@
         :rtype: list stdout
         :raises: devops.error.DevopsCalledProcessError
         """
+        LOG.info("Executing {} on {}".format(cmd, node_name))
         remote = self.remote(node_name=node_name, host=host,
                              address_pool=address_pool)
         return remote.check_call(
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index 194b275..43c9f0c 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -1,9 +1,8 @@
-# git+git://github.com/openstack/fuel-devops.git@887368d#egg=project[postgre]   # Use this requirement for PostgreSQL
 mock>=1.2,<4.0.0 # pinning first to avoid dependency meat grinder below
 libvirt-python>=3.5.0,<4.1.0  # LGPLv2+
-git+git://github.com/openstack/fuel-devops.git@10f4ac744e89bfefcba3d7d009de82669c52fa6e   # Use this requirement for Sqlite3, or if requirements for PostgreSQL are already installed
-git+git://github.com/dis-xcom/fuel-devops-driver-ironic
-paramiko
+git+https://github.com/openstack-archive/fuel-devops.git@10f4ac744e89bfefcba3d7d009de82669c52fa6e   # Use this requirement for Sqlite3, or if requirements for PostgreSQL are already installed
+git+https://github.com/dis-xcom/fuel-devops-driver-ironic
+paramiko<=2.8.0
 six
 requests>=2.2.0
 oslo.config>=6.2.1,<6.6.0 # Apache-2.0
diff --git a/tcp_tests/templates/_heat_environments/eu-cloud-low-flavors.env b/tcp_tests/templates/_heat_environments/eu-cloud-low-flavors.env
index 0d0c859..abbe367 100644
--- a/tcp_tests/templates/_heat_environments/eu-cloud-low-flavors.env
+++ b/tcp_tests/templates/_heat_environments/eu-cloud-low-flavors.env
@@ -21,12 +21,12 @@
   cid_flavor: 8cpu16ram50stor0epf
   ntw_flavor: 8cpu16ram50stor0epf
   nal_flavor: 8cpu16ram50stor0epf
-  dbs_flavor: 8cpu16ram50stor0epf
-  msg_flavor: 8cpu16ram50stor0epf
-  mon_flavor: 8cpu16ram50stor0epf
+  dbs_flavor: 4cpu8ram40stor0epf
+  msg_flavor: 4cpu4ram50stor0epf
+  mon_flavor: 4cpu4ram50stor0epf
   mdb_flavor: 8cpu16ram50stor0epf
   log_flavor: 8cpu16ram50stor0epf
-  mtr_flavor: 8cpu16ram50stor0epf
+  mtr_flavor: 4cpu4ram50stor0epf
   cmp_flavor: 4cpu8ram100stor10epf
   cmn_flavor: 4cpu8ram40stor0epf
   rgw_flavor: 4cpu8ram40stor0epf
@@ -40,7 +40,7 @@
   vsrx_flavor: oc_vsrx
 
   key_pair: system_key_8133
-  bm_availability_zone: nova
+  bm_availability_zone: vlan-provider-net-az
   vm_availability_zone: nova
   net_public: public
 
diff --git a/tcp_tests/templates/_heat_environments/eu-cloud.env b/tcp_tests/templates/_heat_environments/eu-cloud.env
index 7b77db1..d5e0e5a 100644
--- a/tcp_tests/templates/_heat_environments/eu-cloud.env
+++ b/tcp_tests/templates/_heat_environments/eu-cloud.env
@@ -44,7 +44,7 @@
   vsrx_flavor: oc_vsrx
 
   key_pair: system_key_8133
-  bm_availability_zone: nova
+  bm_availability_zone: vlan-provider-net-az
   vm_availability_zone: nova
   net_public: public
 
diff --git a/tcp_tests/templates/_heat_environments/us-cloud-low-flavors.env b/tcp_tests/templates/_heat_environments/us-cloud-low-flavors.env
index ad4290e..1af6209 100644
--- a/tcp_tests/templates/_heat_environments/us-cloud-low-flavors.env
+++ b/tcp_tests/templates/_heat_environments/us-cloud-low-flavors.env
@@ -21,12 +21,12 @@
   cid_flavor: 8cpu16ram50stor0epf
   ntw_flavor: 8cpu16ram50stor0epf
   nal_flavor: 8cpu16ram50stor0epf
-  dbs_flavor: 8cpu16ram50stor0epf
-  msg_flavor: 8cpu16ram50stor0epf
-  mon_flavor: 8cpu16ram50stor0epf
+  dbs_flavor: 4cpu8ram40stor0epf
+  msg_flavor: 4cpu4ram50stor0epf
+  mon_flavor: 4cpu4ram50stor0epf
   mdb_flavor: 8cpu16ram50stor0epf
   log_flavor: 8cpu16ram50stor0epf
-  mtr_flavor: 8cpu16ram50stor0epf
+  mtr_flavor: 4cpu4ram50stor0epf
   cmp_flavor: 4cpu8ram100stor10epf
   cmn_flavor: 4cpu8ram40stor0epf
   rgw_flavor: 4cpu8ram40stor0epf
diff --git a/tcp_tests/templates/_packer/scripts/jenkins_virtualenvs.sh b/tcp_tests/templates/_packer/scripts/jenkins_virtualenvs.sh
index 1e0d43e..70cd37f 100644
--- a/tcp_tests/templates/_packer/scripts/jenkins_virtualenvs.sh
+++ b/tcp_tests/templates/_packer/scripts/jenkins_virtualenvs.sh
@@ -35,7 +35,7 @@
 . ${TESTMARKER_VENV_PATH}/bin/activate
 #pip install git+https://github.com/ibumarskov/testrail-reporter -U
 # Pull from review to test changes in testrail-reporter before their merging
-pip install git+https://review.gerrithub.io/ibumarskov/testrail-reporter@refs/changes/94/514594/3
+pip install git+https://review.gerrithub.io/ibumarskov/testrail-reporter@refs/changes/94/514594/8
 deactivate
 
 if [ "$CHANGE_RIGHTS" = true ]; then
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay.hot b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay.hot
index f2a9ef6..da17023 100644
--- a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay.hot
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay.hot
@@ -80,7 +80,7 @@
     properties:
       env_name: { get_param: env_name }
       mcp_version: { get_param: mcp_version }
-      cfg01_flavor: { get_param: cfg_flavor }
+      cfg01_flavor: re.jenkins.slave.large
       availability_zone: { get_param: bm_availability_zone }
       management_net: 'system-phys-2401'
       control_net: 'system-phys-2404'
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index 04adcc4..2499b71 100644
--- a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -21,9 +21,9 @@
   control_vlan: '2404'
 
   jenkins_pipelines_branch: 'release/2019.2.0'
-  deploy_network_gateway: 172.16.164.1
-  deploy_network_netmask: 255.255.255.192
-  deploy_network_subnet: 172.16.164.0/26
+  deploy_network_gateway: 172.16.180.1
+  deploy_network_netmask: 255.255.254.0
+  deploy_network_subnet: 172.16.180.0/23
   deployment_type: physical
   dns_server01: 172.18.176.6
   dns_server02: 172.18.224.6
@@ -32,22 +32,22 @@
   infra_bond_mode: active-backup
   infra_deploy_nic: eth0
   infra_kvm01_control_address: 10.167.11.241
-  infra_kvm01_deploy_address: 172.16.164.3
+  infra_kvm01_deploy_address: 172.16.180.3
   infra_kvm01_hostname: kvm01
   infra_kvm02_control_address: 10.167.11.242
-  infra_kvm02_deploy_address: 172.16.164.4
+  infra_kvm02_deploy_address: 172.16.180.4
   infra_kvm02_hostname: kvm02
   infra_kvm03_control_address: 10.167.11.243
-  infra_kvm03_deploy_address: 172.16.164.5
+  infra_kvm03_deploy_address: 172.16.180.5
   infra_kvm03_hostname: kvm03
   infra_kvm04_control_address: 10.167.11.244
-  infra_kvm04_deploy_address: 172.16.164.6
+  infra_kvm04_deploy_address: 172.16.180.6
   infra_kvm04_hostname: kvm04
   infra_kvm05_control_address: 10.167.11.245
-  infra_kvm05_deploy_address: 172.16.164.7
+  infra_kvm05_deploy_address: 172.16.180.7
   infra_kvm05_hostname: kvm05
   infra_kvm06_control_address: 10.167.11.246
-  infra_kvm06_deploy_address: 172.16.164.8
+  infra_kvm06_deploy_address: 172.16.180.8
   infra_kvm06_hostname: kvm06
   infra_kvm_vip_address: 10.167.11.240
   infra_primary_first_nic: eth1
@@ -55,11 +55,11 @@
   kubernetes_enabled: 'False'
   local_repositories: 'False'
   maas_enabled: 'True'
-  maas_deploy_address: 172.16.164.2
-  maas_deploy_cidr: 172.16.164.0/26
-  maas_deploy_gateway: 172.16.164.1
-  maas_deploy_range_end: 172.16.164.61
-  maas_deploy_range_start: 172.16.164.18
+  maas_deploy_address: 172.16.180.2
+  maas_deploy_cidr: 172.16.180.0/23
+  maas_deploy_gateway: 172.16.180.1
+  maas_deploy_range_end: 172.16.180.61
+  maas_deploy_range_start: 172.16.180.18
   maas_dhcp_enabled: 'True'
   maas_fabric_name: fabric-0
   maas_hostname: cfg01
@@ -74,7 +74,7 @@
             one1:
               mac: "0c:c4:7a:33:24:be"
               mode: "static"
-              ip: "172.16.164.3"
+              ip: "172.16.180.3"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -111,7 +111,7 @@
             one1:
               mac: "0c:c4:7a:33:2d:6a"
               mode: "static"
-              ip: "172.16.164.4"
+              ip: "172.16.180.4"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -148,7 +148,7 @@
             one1:
               mac: "0c:c4:7a:69:a0:4c"
               mode: "static"
-              ip: "172.16.164.5"
+              ip: "172.16.180.5"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -186,7 +186,7 @@
             one1:
               mac: "0c:c4:7a:6c:83:5c"
               mode: "static"
-              ip: "172.16.164.6"
+              ip: "172.16.180.6"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -223,7 +223,7 @@
             one1:
               mac: "0c:c4:7a:6c:88:d6"
               mode: "static"
-              ip: "172.16.164.7"
+              ip: "172.16.180.7"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -260,7 +260,7 @@
             one1:
               mac: "0c:c4:7a:aa:df:ac"
               mode: "static"
-              ip: "172.16.164.8"
+              ip: "172.16.180.8"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -297,7 +297,7 @@
             one1:
               mac: "0c:c4:7a:aa:d5:84"
               mode: "static"
-              ip: "172.16.164.9"
+              ip: "172.16.180.9"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -334,7 +334,7 @@
             one1:
               mac: "0c:c4:7a:aa:d5:82"
               mode: "static"
-              ip: "172.16.164.10"
+              ip: "172.16.180.10"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -371,7 +371,7 @@
             one1:
               mac: "0c:c4:7a:6c:bc:f6"
               mode: "static"
-              ip: "172.16.164.11"
+              ip: "172.16.180.11"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -408,7 +408,7 @@
             one1:
               mac: "0c:c4:7a:aa:c9:02"
               mode: "static"
-              ip: "172.16.164.12"
+              ip: "172.16.180.12"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -445,7 +445,7 @@
             one1:
               mac: "0c:c4:7a:aa:d5:60"
               mode: "static"
-              ip: "172.16.164.13"
+              ip: "172.16.180.13"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -482,7 +482,7 @@
             one1:
               mac: "0c:c4:7a:aa:c9:3a"
               mode: "static"
-              ip: "172.16.164.14"
+              ip: "172.16.180.14"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -519,7 +519,7 @@
             one1:
               mac: "0c:c4:7a:aa:d6:aa"
               mode: "static"
-              ip: "172.16.164.15"
+              ip: "172.16.180.15"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -556,7 +556,7 @@
             one1:
               mac: "0c:c4:7a:aa:ce:30"
               mode: "static"
-              ip: "172.16.164.16"
+              ip: "172.16.180.16"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -597,7 +597,7 @@
             one1:
               mac: "0c:c4:7a:aa:e0:ce"
               mode: "static"
-              ip: "172.16.164.17"
+              ip: "172.16.180.17"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -639,7 +639,7 @@
   openstack_compute_count: '3'
   openstack_compute_rack01_hostname: cmp
   openstack_compute_single_address_ranges: 10.167.11.15-10.167.11.17
-  openstack_compute_deploy_address_ranges: 172.16.164.15-172.16.164.17
+  openstack_compute_deploy_address_ranges: 172.16.180.15-172.16.180.17
   openstack_compute_tenant_address_ranges: 10.167.12.15-10.167.12.17
   openstack_compute_backend_address_ranges: 10.167.12.15-10.167.12.17
   openstack_control_address: 10.167.11.10
@@ -659,9 +659,9 @@
   openstack_database_node03_address: 10.167.11.53
   openstack_database_node03_hostname: dbs03
   openstack_enabled: 'True'
-  openstack_gateway_node01_deploy_address: 172.16.164.9
-  openstack_gateway_node02_deploy_address: 172.16.164.10
-  openstack_gateway_node03_deploy_address: 172.16.164.11
+  openstack_gateway_node01_deploy_address: 172.16.180.9
+  openstack_gateway_node02_deploy_address: 172.16.180.10
+  openstack_gateway_node03_deploy_address: 172.16.180.11
   openstack_gateway_node01_address: 10.167.11.224
   openstack_gateway_node01_hostname: gtw01
   openstack_gateway_node02_hostname: gtw02
@@ -708,7 +708,7 @@
   salt_api_password_hash: $6$qdIFillN$XnzP7oIXRcbroVch7nlthyrSekjKlWND8q2MtoMF3Wz2ymepjAOjyqpyR55nmbH9OQzS8EcQJ6sfr5hWKDesV1
   salt_master_address: 10.167.11.5
   salt_master_hostname: cfg01
-  salt_master_management_address: 172.16.164.2
+  salt_master_management_address: 172.16.180.2
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.11.60
   stacklight_log_hostname: log
@@ -756,7 +756,7 @@
   ceph_public_network_allocation: storage
   ceph_cluster_network: "10.167.11.0/24"
   ceph_osd_single_address_ranges: "10.167.11.200-10.167.11.202"
-  ceph_osd_deploy_address_ranges: "172.16.164.8-172.16.164.10"
+  ceph_osd_deploy_address_ranges: "172.16.180.8-172.16.180.10"
   ceph_osd_storage_address_ranges: "10.167.11.200-10.167.11.202"
   ceph_osd_backend_address_ranges: "10.167.12.200-10.167.12.202"
   ceph_osd_data_disks: "/dev/sdb"
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay--user-data-foundation.yaml b/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay--user-data-foundation.yaml
index c9fd9d6..617b1fa 100644
--- a/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay--user-data-foundation.yaml
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay--user-data-foundation.yaml
@@ -68,7 +68,7 @@
       auto ens3
       iface ens3 inet static
       address $management_static_ip
-      netmask 255.255.255.192
+      netmask 255.255.254.0
       gateway $management_gw
       dns-nameservers $dnsaddress
 
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay.hot b/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay.hot
index 0d37b6f..f40da29 100644
--- a/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay.hot
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay.hot
@@ -33,19 +33,19 @@
     default: "172.17.42.0/26"
   management_subnet_cidr:
     type: string
-    default: "172.16.164.0/26"
+    default: "172.16.180.0/23"
   management_subnet_cfg01_ip:
     type: string
-    default: 172.16.164.2
+    default: 172.16.180.2
   management_subnet_gateway_ip:
     type: string
-    default: 172.16.164.1
+    default: 172.16.180.1
   management_subnet_pool_start:
     type: string
-    default: 172.16.164.3
+    default: 172.16.180.3
   management_subnet_pool_end:
     type: string
-    default: 172.16.164.61
+    default: 172.16.180.61
   salt_master_control_ip:
     type: string
     default: 10.167.11.5
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index 62e2064..81d3bf7 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -36,9 +36,9 @@
   control_vlan: '2404'
 
   jenkins_pipelines_branch: 'release/2019.2.0'
-  deploy_network_gateway: 172.16.164.1
-  deploy_network_netmask: 255.255.255.192
-  deploy_network_subnet: 172.16.164.0/26
+  deploy_network_gateway: 172.16.180.1
+  deploy_network_netmask: 255.255.254.0
+  deploy_network_subnet: 172.16.180.0/23
   deployment_type: physical
   dns_server01: 172.18.176.6
   dns_server02: 172.18.224.6
@@ -47,22 +47,22 @@
   infra_bond_mode: active-backup
   infra_deploy_nic: eth0
   infra_kvm01_control_address: 10.167.11.241
-  infra_kvm01_deploy_address: 172.16.164.3
+  infra_kvm01_deploy_address: 172.16.180.3
   infra_kvm01_hostname: kvm01
   infra_kvm02_control_address: 10.167.11.242
-  infra_kvm02_deploy_address: 172.16.164.4
+  infra_kvm02_deploy_address: 172.16.180.4
   infra_kvm02_hostname: kvm02
   infra_kvm03_control_address: 10.167.11.243
-  infra_kvm03_deploy_address: 172.16.164.5
+  infra_kvm03_deploy_address: 172.16.180.5
   infra_kvm03_hostname: kvm03
   infra_kvm04_control_address: 10.167.11.244
-  infra_kvm04_deploy_address: 172.16.164.6
+  infra_kvm04_deploy_address: 172.16.180.6
   infra_kvm04_hostname: kvm04
   infra_kvm05_control_address: 10.167.11.245
-  infra_kvm05_deploy_address: 172.16.164.7
+  infra_kvm05_deploy_address: 172.16.180.7
   infra_kvm05_hostname: kvm05
   infra_kvm06_control_address: 10.167.11.246
-  infra_kvm06_deploy_address: 172.16.164.8
+  infra_kvm06_deploy_address: 172.16.180.8
   infra_kvm06_hostname: kvm06
   infra_kvm_vip_address: 10.167.11.240
   infra_primary_first_nic: eth1
@@ -70,11 +70,11 @@
   kubernetes_enabled: 'False'
   local_repositories: 'False'
   maas_enabled: 'True'
-  maas_deploy_address: 172.16.164.2
-  maas_deploy_cidr: 172.16.164.0/26
-  maas_deploy_gateway: 172.16.164.1
-  maas_deploy_range_end: 172.16.164.62
-  maas_deploy_range_start: 172.16.164.18
+  maas_deploy_address: 172.16.180.2
+  maas_deploy_cidr: 172.16.180.0/23
+  maas_deploy_gateway: 172.16.180.1
+  maas_deploy_range_end: 172.16.180.62
+  maas_deploy_range_start: 172.16.180.18
   maas_dhcp_enabled: 'True'
   maas_fabric_name: fabric-0
   maas_hostname: cfg01
@@ -89,7 +89,7 @@
             one1:
               mac: "0c:c4:7a:33:24:be"
               mode: "static"
-              ip: "172.16.164.3"
+              ip: "172.16.180.3"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -126,7 +126,7 @@
             one1:
               mac: "0c:c4:7a:33:2d:6a"
               mode: "static"
-              ip: "172.16.164.4"
+              ip: "172.16.180.4"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -163,7 +163,7 @@
             one1:
               mac: "0c:c4:7a:69:a0:4c"
               mode: "static"
-              ip: "172.16.164.5"
+              ip: "172.16.180.5"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -201,7 +201,7 @@
             one1:
               mac: "0c:c4:7a:6c:83:5c"
               mode: "static"
-              ip: "172.16.164.6"
+              ip: "172.16.180.6"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -238,7 +238,7 @@
             one1:
               mac: "0c:c4:7a:6c:88:d6"
               mode: "static"
-              ip: "172.16.164.7"
+              ip: "172.16.180.7"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -275,7 +275,7 @@
             one1:
               mac: "0c:c4:7a:aa:df:ac"
               mode: "static"
-              ip: "172.16.164.8"
+              ip: "172.16.180.8"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -312,7 +312,7 @@
             one1:
               mac: "0c:c4:7a:aa:d5:84"
               mode: "static"
-              ip: "172.16.164.9"
+              ip: "172.16.180.9"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -349,7 +349,7 @@
             one1:
               mac: "0c:c4:7a:aa:d5:82"
               mode: "static"
-              ip: "172.16.164.10"
+              ip: "172.16.180.10"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -386,7 +386,7 @@
             one1:
               mac: "0c:c4:7a:6c:bc:f6"
               mode: "static"
-              ip: "172.16.164.11"
+              ip: "172.16.180.11"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -423,7 +423,7 @@
             one1:
               mac: "0c:c4:7a:aa:c9:02"
               mode: "static"
-              ip: "172.16.164.12"
+              ip: "172.16.180.12"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -460,7 +460,7 @@
             one1:
               mac: "0c:c4:7a:aa:d5:60"
               mode: "static"
-              ip: "172.16.164.13"
+              ip: "172.16.180.13"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -497,7 +497,7 @@
             one1:
               mac: "0c:c4:7a:aa:c9:3a"
               mode: "static"
-              ip: "172.16.164.14"
+              ip: "172.16.180.14"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -534,7 +534,7 @@
             one1:
               mac: "0c:c4:7a:aa:d6:aa"
               mode: "static"
-              ip: "172.16.164.15"
+              ip: "172.16.180.15"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -571,7 +571,7 @@
             one1:
               mac: "0c:c4:7a:aa:ce:30"
               mode: "static"
-              ip: "172.16.164.16"
+              ip: "172.16.180.16"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -612,7 +612,7 @@
             one1:
               mac: "0c:c4:7a:aa:e0:ce"
               mode: "static"
-              ip: "172.16.164.17"
+              ip: "172.16.180.17"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -654,7 +654,7 @@
   openstack_compute_count: '3'
   openstack_compute_rack01_hostname: cmp
   openstack_compute_single_address_ranges: 10.167.11.15-10.167.11.17
-  openstack_compute_deploy_address_ranges: 172.16.164.15-172.16.164.17
+  openstack_compute_deploy_address_ranges: 172.16.180.15-172.16.180.17
   openstack_compute_tenant_address_ranges: 10.167.12.15-10.167.12.17
   openstack_compute_backend_address_ranges: 10.167.12.15-10.167.12.17
   openstack_control_address: 10.167.11.10
@@ -674,9 +674,9 @@
   openstack_database_node03_address: 10.167.11.53
   openstack_database_node03_hostname: dbs03
   openstack_enabled: 'True'
-  openstack_gateway_node01_deploy_address: 172.16.164.9
-  openstack_gateway_node02_deploy_address: 172.16.164.10
-  openstack_gateway_node03_deploy_address: 172.16.164.11
+  openstack_gateway_node01_deploy_address: 172.16.180.9
+  openstack_gateway_node02_deploy_address: 172.16.180.10
+  openstack_gateway_node03_deploy_address: 172.16.180.11
   openstack_gateway_node01_address: 10.167.11.224
   openstack_gateway_node01_hostname: gtw01
   openstack_gateway_node02_hostname: gtw02
@@ -722,7 +722,7 @@
   salt_api_password_hash: $6$qdIFillN$XnzP7oIXRcbroVch7nlthyrSekjKlWND8q2MtoMF3Wz2ymepjAOjyqpyR55nmbH9OQzS8EcQJ6sfr5hWKDesV1
   salt_master_address: 10.167.11.5
   salt_master_hostname: cfg01
-  salt_master_management_address: 172.16.164.2
+  salt_master_management_address: 172.16.180.2
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.11.60
   stacklight_log_hostname: log
@@ -779,7 +779,7 @@
   ceph_public_network_allocation: storage
   ceph_cluster_network: "10.167.11.0/24"
   ceph_osd_single_address_ranges: "10.167.11.200-10.167.11.202"
-  ceph_osd_deploy_address_ranges: "172.16.164.8-172.16.164.10"
+  ceph_osd_deploy_address_ranges: "172.16.180.8-172.16.180.10"
   ceph_osd_storage_address_ranges: "10.167.11.200-10.167.11.202"
   ceph_osd_backend_address_ranges: "10.167.12.200-10.167.12.202"
 
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml b/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml
index c9fd9d6..617b1fa 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml
@@ -68,7 +68,7 @@
       auto ens3
       iface ens3 inet static
       address $management_static_ip
-      netmask 255.255.255.192
+      netmask 255.255.254.0
       gateway $management_gw
       dns-nameservers $dnsaddress
 
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay.hot b/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay.hot
index eac31bf..4306ae5 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay.hot
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay.hot
@@ -33,19 +33,19 @@
     default: "172.17.42.0/26"
   management_subnet_cidr:
     type: string
-    default: "172.16.164.0/26"
+    default: "172.16.180.0/23"
   management_subnet_cfg01_ip:
     type: string
-    default: 172.16.164.2
+    default: 172.16.180.2
   management_subnet_gateway_ip:
     type: string
-    default: 172.16.164.1
+    default: 172.16.180.1
   management_subnet_pool_start:
     type: string
-    default: 172.16.164.3
+    default: 172.16.180.3
   management_subnet_pool_end:
     type: string
-    default: 172.16.164.61
+    default: 172.16.180.61
   salt_master_control_ip:
     type: string
     default: 10.167.11.5
diff --git a/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-cookiecutter-openstack_odl.yaml b/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-cookiecutter-openstack_odl.yaml
index 3458dbc..abf1834 100644
--- a/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-cookiecutter-openstack_odl.yaml
+++ b/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-cookiecutter-openstack_odl.yaml
@@ -305,6 +305,7 @@
   openstack_message_queue_node03_address: 10.167.11.43
   openstack_message_queue_node03_hostname: msg03
   openstack_network_engine: ovs
+  openstack_ovs_dvr_enabled: 'False'
   openstack_neutron_qos: 'True'
   openstack_neutron_vlan_aware_vms: 'True'
   openstack_nfv_dpdk_enabled: 'False'
@@ -316,7 +317,6 @@
   openstack_nfv_sriov_pf_nic: enp5s0f1
   openstack_nova_cpu_pinning: 6,7,8,9,10,11
   openstack_nova_compute_reserved_host_memory_mb: '900'
-  openstack_ovs_dvr_enabled: 'True'
   openstack_ovs_encapsulation_type: vxlan
   openstack_ovs_encapsulation_vlan_range: 2402:2406
   openstack_proxy_address: 10.167.11.80
@@ -360,6 +360,8 @@
   stacklight_telemetry_node02_hostname: mtr02
   stacklight_telemetry_node03_address: 10.167.11.99
   stacklight_telemetry_node03_hostname: mtr03
+  opendaylight_control_node01_address: 10.167.11.220
+  opendaylight_control_node01_hostname: odl01
   static_ips_on_deploy_network_enabled: 'False'
   tenant_network_gateway: 10.167.13.1
   tenant_network_netmask: 255.255.255.0
diff --git a/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-vcp-environment.yaml b/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-vcp-environment.yaml
index 5717e2e..b2e6985 100644
--- a/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-vcp-environment.yaml
+++ b/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-vcp-environment.yaml
@@ -339,3 +339,14 @@
           role: single_dhcp
         ens3:
           role: single_ctl
+
+    odl01.bm-e7-cicd-pike-odl-maas.local:
+      reclass_storage_name: opendaylight_control_node01
+      roles:
+      - opendaylight_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
diff --git a/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index 28d5916..026932b 100644
--- a/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -190,7 +190,7 @@
                 devices:
                   - sdb
                 volume:
-                  cinder-volumes-pool:
+                  cinder-vg-pool:
                     size: 800G
           power_parameters:
             power_address: "5.43.227.11"
@@ -243,7 +243,7 @@
                 devices:
                   - sdb
                 volume:
-                  cinder-volumes-pool:
+                  cinder-vg-pool:
                     size: 800G
           power_parameters:
             power_address: "5.43.227.19"
@@ -263,10 +263,10 @@
   openstack_cluster_size: compact
   openstack_compute_count: '2'
   openstack_compute_rack01_hostname: cmp
-  openstack_compute_single_address_ranges: 10.167.11.15-10.167.11.16
+  openstack_compute_single_address_ranges: 10.167.11.15-10.167.11.254
   openstack_compute_deploy_address_ranges: 172.16.162.73-172.16.162.74
-  openstack_compute_tenant_address_ranges: 10.167.13.15-10.167.13.16
-  openstack_compute_backend_address_ranges: 10.167.13.15-10.167.13.16
+  openstack_compute_tenant_address_ranges: 10.167.13.15-10.167.13.254
+  openstack_compute_backend_address_ranges: 10.167.13.15-10.167.13.254
   openstack_control_address: 10.167.11.10
   openstack_control_hostname: ctl
   openstack_control_node01_address: 10.167.11.11
@@ -305,6 +305,7 @@
   openstack_message_queue_node03_address: 10.167.11.43
   openstack_message_queue_node03_hostname: msg03
   openstack_network_engine: ovs
+  openstack_ovs_dvr_enabled: 'False'
   openstack_neutron_qos: 'True'
   openstack_neutron_vlan_aware_vms: 'True'
   openstack_nfv_dpdk_enabled: 'False'
@@ -316,7 +317,6 @@
   openstack_nfv_sriov_pf_nic: enp5s0f1
   openstack_nova_cpu_pinning: 6,7,8,9,10,11
   openstack_nova_compute_reserved_host_memory_mb: '900'
-  openstack_ovs_dvr_enabled: 'True'
   openstack_ovs_encapsulation_type: vxlan
   openstack_ovs_encapsulation_vlan_range: 2402:2406
   openstack_proxy_address: 10.167.11.80
diff --git a/tcp_tests/templates/cookied-model-generator/salt_released-bm-b300-cicd-queens-ovs-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_released-bm-b300-cicd-queens-ovs-maas.yaml
new file mode 100644
index 0000000..a1f1792
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_released-bm-b300-cicd-queens-ovs-maas.yaml
@@ -0,0 +1,147 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+# Other salt model repository parameters see in shared-salt.yaml
+{% set LAB_CONFIG_NAME = 'released-bm-b300-cicd-queens-ovs-maas' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','released-bm-b300-cicd-queens-ovs-maas') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-environment.yaml','salt-context-cookiecutter-openstack_ovs.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2404') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2406') %}
+
+{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
+{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
+{%- set CISCO_PASS = os_env('CISCO_PASS', 'cisco_pass') %}
+
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_UPLOAD_AND_IMPORT_GPG_ENCRYPTION_KEY() }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Upload vcompute cluster config to cfg01.{{ DOMAIN_NAME }}"
+  upload:
+    local_path:  {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: openstack_vcompute_cluster.yml
+    remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "Upload vcmp config to cfg01.{{ DOMAIN_NAME }}"
+  upload:
+    local_path:  {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: vcmp.yml
+    remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/compute/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "Upload vcmp network definition to cfg01.{{ DOMAIN_NAME }}"
+  upload:
+    local_path:  {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: vcmpcompute.yml
+    remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/networking/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "Upload env_add.yml to cfg01.{{ DOMAIN_NAME }}"
+  upload:
+    local_path:  {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: env_add.yml
+    remote_path: /root/environment/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "Upload infra_config_nodes_add.yml to cfg01.{{ DOMAIN_NAME }}"
+  upload:
+    local_path:  {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: infra_config_nodes_add.yml
+    remote_path: /root/environment/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "Upload infra_kvm_add.yml to cfg01.{{ DOMAIN_NAME }}"
+  upload:
+    local_path:  {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: infra_kvm_add.yml
+    remote_path: /root/environment/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "Upload openstack_init_add.yml to cfg01.{{ DOMAIN_NAME }}"
+  upload:
+    local_path:  {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: openstack_init_add.yml
+    remote_path: /root/environment/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "Modify model to add 254 compute VMs"
+  cmd: |
+    set -e;
+    set -x;
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools merge-context /root/environment/env_add.yml /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/init.yml;
+    reclass-tools merge-context /root/environment/infra_config_nodes_add.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/nodes.yml;
+    reclass-tools merge-context /root/environment/infra_kvm_add.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    reclass-tools merge-context /root/environment/openstack_init_add.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/init.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+  cmd: |
+    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-baremetal/br\_baremetal/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
+    salt '*' saltutil.refresh_pillar;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
+- description: Defining username and password params for IPMI access
+  cmd: |
+    sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+    sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+    sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/baremetal_nodes.yml;
+    sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/baremetal_nodes.yml;
+    sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/switch.yml;
+    sed -i 's/==CISCO_PASS==/${_param:cisco_password}/g' /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/switch.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "Add user/password for IPMI access"
+  cmd: |
+    set -e;
+    set -x;
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+    reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+    reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/baremetal_nodes.yml;
+    reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/baremetal_nodes.yml;
+    reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/switch.yml;
+    reclass-tools add-key parameters._param.cisco_password {{ CISCO_PASS }} /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/switch.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/e_lab_engineer b/tcp_tests/templates/e_lab_engineer
index 91bcf2f..1c1a7bf 100644
--- a/tcp_tests/templates/e_lab_engineer
+++ b/tcp_tests/templates/e_lab_engineer
@@ -1,5 +1,4 @@
 176.74.217.64
-185.8.59.228
 5.43.225.89
 5.43.227.11
 5.43.227.19
diff --git a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml
index 50653c2..71c6fe5 100644
--- a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml
@@ -136,7 +136,7 @@
             one1:
               mac: "0c:c4:7a:33:24:be"
               mode: "static"
-              ip: "172.16.164.3"
+              ip: "172.16.180.3"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -173,7 +173,7 @@
             one1:
               mac: "0c:c4:7a:33:2d:6a"
               mode: "static"
-              ip: "172.16.164.4"
+              ip: "172.16.180.4"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -210,7 +210,7 @@
             one1:
               mac: "0c:c4:7a:69:a0:4c"
               mode: "static"
-              ip: "172.16.164.5"
+              ip: "172.16.180.5"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -248,7 +248,7 @@
             one1:
               mac: "0c:c4:7a:6c:83:5c"
               mode: "static"
-              ip: "172.16.164.6"
+              ip: "172.16.180.6"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -285,7 +285,7 @@
             one1:
               mac: "0c:c4:7a:6c:88:d6"
               mode: "static"
-              ip: "172.16.164.7"
+              ip: "172.16.180.7"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -322,7 +322,7 @@
             one1:
               mac: "0c:c4:7a:aa:df:ac"
               mode: "static"
-              ip: "172.16.164.8"
+              ip: "172.16.180.8"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -359,7 +359,7 @@
             #            one1:
             #              mac: "0c:c4:7a:aa:d5:84"
             #              mode: "static"
-            #              ip: "172.16.164.9"
+            #              ip: "172.16.180.9"
             #              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
             #              gateway: ${_param:deploy_network_gateway}
             #              name: one1
@@ -396,7 +396,7 @@
             #            one1:
             #              mac: "0c:c4:7a:aa:d5:82"
             #              mode: "static"
-            #              ip: "172.16.164.10"
+            #              ip: "172.16.180.10"
             #              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
             #              gateway: ${_param:deploy_network_gateway}
             #              name: one1
@@ -433,7 +433,7 @@
             #            one1:
             #              mac: "0c:c4:7a:6c:bc:f6"
             #              mode: "static"
-            #              ip: "172.16.164.11"
+            #              ip: "172.16.180.11"
             #              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
             #              gateway: ${_param:deploy_network_gateway}
             #              name: one1
@@ -470,7 +470,7 @@
             one1:
               mac: "0c:c4:7a:aa:c9:02"
               mode: "static"
-              ip: "172.16.164.12"
+              ip: "172.16.180.12"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -507,7 +507,7 @@
             one1:
               mac: "0c:c4:7a:aa:d5:60"
               mode: "static"
-              ip: "172.16.164.13"
+              ip: "172.16.180.13"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -544,7 +544,7 @@
             one1:
               mac: "0c:c4:7a:aa:c9:3a"
               mode: "static"
-              ip: "172.16.164.14"
+              ip: "172.16.180.14"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -581,7 +581,7 @@
             one1:
               mac: "0c:c4:7a:aa:d6:aa"
               mode: "static"
-              ip: "172.16.164.15"
+              ip: "172.16.180.15"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -618,7 +618,7 @@
             one1:
               mac: "0c:c4:7a:aa:ce:30"
               mode: "static"
-              ip: "172.16.164.16"
+              ip: "172.16.180.16"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -659,7 +659,7 @@
             one1:
               mac: "0c:c4:7a:aa:e0:ce"
               mode: "static"
-              ip: "172.16.164.17"
+              ip: "172.16.180.17"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
diff --git a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml
index 561c8a1..21d8fb4 100644
--- a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml
@@ -76,6 +76,7 @@
   retry: {count: 6, delay: 5}
   skip_fail: false
 
-{{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
+{{ SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS() }}
+{{ SHARED_WORKAROUNDS.DELETE_BOND0() }}
 {{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
 {{ SHARED_WORKAROUNDS.CLEAR_CEPH_OSD_DRIVES() }}
diff --git a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/underlay.hot b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/underlay.hot
index e11335f..8b01d59 100644
--- a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/underlay.hot
+++ b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/underlay.hot
@@ -35,19 +35,19 @@
     default: "10.9.0.0/24"
   management_subnet_cidr:
     type: string
-    default: "172.16.164.0/26"
+    default: "172.16.180.0/23"
   management_subnet_cfg01_ip:
     type: string
-    default: 172.16.164.2
+    default: 172.16.180.2
   management_subnet_gateway_ip:
     type: string
-    default: 172.16.164.1
+    default: 172.16.180.1
   management_subnet_pool_start:
     type: string
-    default: 172.16.164.3
+    default: 172.16.180.3
   management_subnet_pool_end:
     type: string
-    default: 172.16.164.61
+    default: 172.16.180.61
   salt_master_control_ip:
     type: string
     default: 10.6.0.15
diff --git a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml
index d08c4c8..adac30b 100644
--- a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml
@@ -136,7 +136,7 @@
             one1:
               mac: "0c:c4:7a:33:24:be"
               mode: "static"
-              ip: "172.16.164.3"
+              ip: "172.16.180.3"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -173,7 +173,7 @@
             one1:
               mac: "0c:c4:7a:33:2d:6a"
               mode: "static"
-              ip: "172.16.164.4"
+              ip: "172.16.180.4"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -210,7 +210,7 @@
             one1:
               mac: "0c:c4:7a:69:a0:4c"
               mode: "static"
-              ip: "172.16.164.5"
+              ip: "172.16.180.5"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -248,7 +248,7 @@
             one1:
               mac: "0c:c4:7a:6c:83:5c"
               mode: "static"
-              ip: "172.16.164.6"
+              ip: "172.16.180.6"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -285,7 +285,7 @@
             one1:
               mac: "0c:c4:7a:6c:88:d6"
               mode: "static"
-              ip: "172.16.164.7"
+              ip: "172.16.180.7"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -322,7 +322,7 @@
             one1:
               mac: "0c:c4:7a:aa:df:ac"
               mode: "static"
-              ip: "172.16.164.8"
+              ip: "172.16.180.8"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -359,7 +359,7 @@
             #            one1:
             #              mac: "0c:c4:7a:aa:d5:84"
             #              mode: "static"
-            #              ip: "172.16.164.9"
+            #              ip: "172.16.180.9"
             #              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
             #              gateway: ${_param:deploy_network_gateway}
             #              name: one1
@@ -396,7 +396,7 @@
             #            one1:
             #              mac: "0c:c4:7a:aa:d5:82"
             #              mode: "static"
-            #              ip: "172.16.164.10"
+            #              ip: "172.16.180.10"
             #              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
             #              gateway: ${_param:deploy_network_gateway}
             #              name: one1
@@ -433,7 +433,7 @@
             #            one1:
             #              mac: "0c:c4:7a:6c:bc:f6"
             #              mode: "static"
-            #              ip: "172.16.164.11"
+            #              ip: "172.16.180.11"
             #              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
             #              gateway: ${_param:deploy_network_gateway}
             #              name: one1
@@ -470,7 +470,7 @@
             one1:
               mac: "0c:c4:7a:aa:c9:02"
               mode: "static"
-              ip: "172.16.164.12"
+              ip: "172.16.180.12"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -507,7 +507,7 @@
             one1:
               mac: "0c:c4:7a:aa:d5:60"
               mode: "static"
-              ip: "172.16.164.13"
+              ip: "172.16.180.13"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -544,7 +544,7 @@
             one1:
               mac: "0c:c4:7a:aa:c9:3a"
               mode: "static"
-              ip: "172.16.164.14"
+              ip: "172.16.180.14"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -581,7 +581,7 @@
             one1:
               mac: "0c:c4:7a:aa:d6:aa"
               mode: "static"
-              ip: "172.16.164.15"
+              ip: "172.16.180.15"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -618,7 +618,7 @@
             one1:
               mac: "0c:c4:7a:aa:ce:30"
               mode: "static"
-              ip: "172.16.164.16"
+              ip: "172.16.180.16"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -659,7 +659,7 @@
             one1:
               mac: "0c:c4:7a:aa:e0:ce"
               mode: "static"
-              ip: "172.16.164.17"
+              ip: "172.16.180.17"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
diff --git a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml
index df263e9..43e40d4 100644
--- a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml
@@ -76,6 +76,7 @@
   retry: {count: 6, delay: 5}
   skip_fail: false
 
-{{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
+{{ SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS() }}
+{{ SHARED_WORKAROUNDS.DELETE_BOND0() }}
 {{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
 {{ SHARED_WORKAROUNDS.CLEAR_CEPH_OSD_DRIVES() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/underlay.hot b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/underlay.hot
index b02b758..8bc2a84 100644
--- a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/underlay.hot
+++ b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/underlay.hot
@@ -35,19 +35,19 @@
     default: "10.9.0.0/24"
   management_subnet_cidr:
     type: string
-    default: "172.16.164.0/26"
+    default: "172.16.180.0/23"
   management_subnet_cfg01_ip:
     type: string
-    default: 172.16.164.2
+    default: 172.16.180.2
   management_subnet_gateway_ip:
     type: string
-    default: 172.16.164.1
+    default: 172.16.180.1
   management_subnet_pool_start:
     type: string
-    default: 172.16.164.3
+    default: 172.16.180.3
   management_subnet_pool_end:
     type: string
-    default: 172.16.164.61
+    default: 172.16.180.61
   salt_master_control_ip:
     type: string
     default: 10.6.0.15
diff --git a/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay.hot b/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay.hot
index c7f6ea6..6971e77 100644
--- a/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay.hot
+++ b/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay.hot
@@ -130,7 +130,7 @@
       env_name: { get_param: env_name }
       mcp_version: { get_param: mcp_version }
       cfg01_flavor: { get_param: cfg_flavor }
-      availability_zone: { get_param: bm_availability_zone }
+      availability_zone: { get_param: vm_availability_zone }
       management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
       control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
       tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
@@ -678,7 +678,7 @@
       instance_name: foundation
       instance_image: { get_param: foundation_image }
       instance_flavor: {get_param: foundation_flavor}
-      availability_zone: { get_param: bm_availability_zone }
+      availability_zone: { get_param: vm_availability_zone }
       management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
       control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
       tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
diff --git a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay--user-data-foundation.yaml b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay--user-data-foundation.yaml
index 1677dcd..b1ef08b 100644
--- a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay--user-data-foundation.yaml
+++ b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay--user-data-foundation.yaml
@@ -41,7 +41,7 @@
 
 runcmd:
   # Create swap
-  - fallocate -l 16G /swapfile
+  - fallocate -l 2G /swapfile
   - chmod 600 /swapfile
   - mkswap /swapfile
   - swapon /swapfile
diff --git a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay.hot b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay.hot
index 0c92b47..928d76f 100644
--- a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay.hot
+++ b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay.hot
@@ -127,7 +127,7 @@
       env_name: { get_param: env_name }
       mcp_version: { get_param: mcp_version }
       cfg01_flavor: { get_param: cfg_flavor }
-      availability_zone: { get_param: bm_availability_zone }
+      availability_zone: { get_param: vm_availability_zone }
       management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
       control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
       tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
@@ -932,7 +932,7 @@
       instance_name: foundation
       instance_image: { get_param: foundation_image }
       instance_flavor: {get_param: foundation_flavor}
-      availability_zone: { get_param: bm_availability_zone }
+      availability_zone: { get_param: vm_availability_zone }
       management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
       control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
       tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
diff --git a/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay--user-data-foundation.yaml b/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay--user-data-foundation.yaml
index 1677dcd..b1ef08b 100644
--- a/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay--user-data-foundation.yaml
+++ b/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay--user-data-foundation.yaml
@@ -41,7 +41,7 @@
 
 runcmd:
   # Create swap
-  - fallocate -l 16G /swapfile
+  - fallocate -l 2G /swapfile
   - chmod 600 /swapfile
   - mkswap /swapfile
   - swapon /swapfile
diff --git a/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay.hot b/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay.hot
index 1d24327..6319401 100644
--- a/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay.hot
+++ b/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay.hot
@@ -130,7 +130,7 @@
       env_name: { get_param: env_name }
       mcp_version: { get_param: mcp_version }
       cfg01_flavor: { get_param: cfg_flavor }
-      availability_zone: { get_param: bm_availability_zone }
+      availability_zone: { get_param: vm_availability_zone }
       management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
       control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
       tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
@@ -678,7 +678,7 @@
       instance_name: foundation
       instance_image: { get_param: foundation_image }
       instance_flavor: {get_param: foundation_flavor}
-      availability_zone: { get_param: bm_availability_zone }
+      availability_zone: { get_param: vm_availability_zone }
       management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
       control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
       tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
diff --git a/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/encryption-key.asc b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/encryption-key.asc
new file mode 100644
index 0000000..381eb77
--- /dev/null
+++ b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/encryption-key.asc
@@ -0,0 +1,56 @@
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+
+lQcYBFyBRcUBEACmP/muUIwbEg6Z7dA3c9I2NadcjDHXKg/ViXwaOB4KSd9/FC0o
+KSBPccWb+1sm+zdUy2f/LC5r8RvU7yZd4Mbzz8R1DQncXg4nG7bppW7oAcHpc0jk
+pV/SvdMYxuXsrbKbpoGEquwVkbb4oTv2MLSBfGfFzBeQfiwGEWm1xPLSeXc4biLC
+FatCU7w4LS1U4BEOqRCp6lW/hQFLoX+j6rNT8TwC5AeFpKgUWuQZGOO4fZKpbvo2
+sCvF5VA1HSVXlZtzum6pL1yzLL/SoyLrYOy1KrZQmSBHG9htCZQVmvYK7U5WtWE4
+Ws5IAj+HwvgKyzXE2Srsirj1NqauQRsk+1riQk3rpDrX2BeXNUSoHR5M/RDY0gCc
+8P6heanQRnyFtjUSoovkQsydY77+QVxe0MCs+lZlg31fL+wJVG7FIbIKKwR5sj8i
+/JqhWE+t2ZzIrQ/7o7fRk7hv/u69Vb/t/Nt7fkbn53zoubqi3kNgXf6hwhTUtfW/
+lE9cc4JTzis4i/RnILUDnAwos1c0Z+tGCUo4pbiP71VfU8L259g+clPFXOIkqA9t
+L9JSZQfhH/lRj3Abs57OvZjN7/D1h8PWB+8nTB8bkoUt45SubgQb0Y9maWUcwjxw
+AcJSIk6mq8vVdBu7zOuslDjMnoUZbtJwcSwQQOnb9UUppjs3CjbcH80ttQARAQAB
+AA/9ExdprtDlJf6u2pJqxNNyInOK4p/e4VydMOJ28/PZz0iod8lzXhdK9JSWItF8
+qD9VHVG2gaErO44Wqh9EgqdbcYg8gUycA0hxy5/tI2uyDsaU5CAvEMLE/Eh8Q24j
+3UgdKK64VOnj7p4rKuYpIp55PB1zNU24rwkuOQwq3Yreb7kvLbXIHA2s+xLunGzj
+tcl9a/eSSFD2w+WcPnkvVT2QlmUvhQ12p6w++QdvBkrLa9ZPz1FFPp6AiFtLGK5e
+KW6uyV1xc9BSjujmpmPBkNIynKNpCFxtTn0uH2doMAr5kkuqIV726SfUZISNkyOa
+pHKtnAtsWHmdv9skzQIBAgAzcXTBGbdDxRj6QR+ohqbsCzfu3z9QHSbXUmxezti9
+bQqpsU1SIg8z2oDARFR6KlRzhnfpPvan+Gp9TvYsvxrXe61HpxRMdLj6Gt2Ibruf
+YHCtr1S9J5CzTTOurlIKpACUYIqgVXfgIkQzqiYX8e56PiDTUB++OqEg66i0orXB
+nbHAD2vu16CNvcaNqsak3DWkHMwmEfsuxqyUXNte0eYu9SCHtnNoYT/D7A72gK4b
+Gqg80J8ZCpo1ilIX3xUq8WsH+CoXs0X7hy6Cbi22AqnHFRYmrgoIWmRzJonp393b
+yqmTV+QsKQRpmwdX4hiH78zJLnLEUQMn8CuHAGwaJCzk4okIAMKNrIQZhkdbCCe4
+IrLuMKn4aQj3c22SMXNmu78/0cP9Rtsm3ChjzzelLO7NjvPm0nIvEcThFSIZIXCv
+iWGZCXFCKn3WtA5xWuMFNXsEQcc3AG/qRODdDSeFpo+VH/9IwppAc3zI2jxe1PRD
+G2DnheLaLIKgHunsCYxpftJDod/vRqRHeU7ulMVJfEKVxdzrCbKGiIOXSyS6KowQ
+JOxF/80ocq/25Zc/oH25Y2r/0y+xzDpOHBgU0ndrCZf2z8oOuECJTxcq83UDyJzT
+HrG/hTrU83YsQMZ0AwBrYxpzUfdH7b6y60VE19FrwmMDK6Fz8I/x4Ai0sNkI3QLR
+NntY9fsIANrB3QM8CtsdxXsFvdTEwNLsG8LMdn3loCH6Cq3ejkEKa69Uua+sB6ND
+wYOXWzyksLZJyfxIXux/hMlK/kO3ohGcEFiMUaDZndJy8IKUlDrhwcUZqm7dXMDU
+CIf0T3rOEzOXbNu3UTds3j/ruSvA5KmjzOa4Qnb41CyL5Fh7x0R8Rux3NzAn6Ecx
+Y+nAWRtI/Yz7zdL8zuHaJfbVuxAPJ+ImcXAS7cX6T9dM3tWRlam1+0Ezhdb4F8i5
+lcY7sMu95scDwhV7qOmln6wtGSkBPZgE0+TqRuELZrPvlcIRRIM42UwPWhYO2PG8
+kKd2i5teweDnhzN8+E87VV2BQhP9DA8H/0+ZiXsvaG60JGqNmWzVbB6U1qgwrFOR
+VcuzIWpdZyQR8Ok63GXuA0odoqReolba9R6fVlXchj6INBz2WY2F0twwCRPx7tRg
+Pyq4PaTA8ZYYjAVWVCd9k97gY2i80p4MPzQCnE8g4n6OWGY47pcTwSkm4HBoGoam
+igIRn3Soz7CXGF+PvSGi1T0jpwM5IWfM3IwEUPdPTIJuA2iD/9zSKDvhsP+trJ1Y
+TMe9CW3Llf5mFbHLRZ7LfMOLIngKOIxBAxHiT8wUrIRaH78wHdz8ALDsC+LNP6rK
+hKb8h/VHXaqmf0BlNjGpO7XZXfxXWJ0oTUG5Z+jKz2Ir14HYLZI1GlOA8bQlZXhh
+bXBsZS5jb20gPHNhbHQtbWFzdGVyQGV4YW1wbGUuY29tPokCTgQTAQgAOBYhBLaR
+Vrvqyq56MiGjUvXLKtw2FZsDBQJcgUXFAhsvBQsJCAcCBhUKCQgLAgQWAgMBAh4B
+AheAAAoJEPXLKtw2FZsDpi4P/1kmvlpkbOhrL73zAPyMzYa4Yo2Pi/BoMbyEKNKO
+K3wLCdP6xLGecVIt8pANosksDSGlWAnWj36/jfgt/aZisx1u6MTYaOEHkXahxOX4
+ghDW1cTbdtz7Uy5Ah9O3WNI+ejmOpCtuc3P/XOkdttKZLuCNCs6ocgCsejpNHcFK
+vMhOhnRKV8kcBrG2QLyfSyafBtM/zV+NR4Wrng71Za8fiXHlDanmrAIyuSnD538r
+hTwSFe0C9HntwuF6W+UShN7c+jPJaKQjKbZy9fuFp33NcTSPCB5dH9yrhQvOeFQo
+dFzEabMDFVGPfUVWR+TH39dWYOsq5zFmgQAbOB/vHdmEtrYNrxX0AiCZZHQHTUb9
+oBK68V8eVeFdoRLcMORBZ2RCqkQTOQoAF7o772knltjtsymnI0XNvVC/XCnZv89Q
+/eoivrd/rMMpTFOGcys6EAnSUWx0ZG/JCkezQqnx9U219BvqKNOZ60aOeOYHKpsX
+Ha8Nr72YRmtm0UMsDjEUyLOj+o06XnN7uafMv2bZpjWh2hfOrkAbxe41z6t+78ho
+P+C5vSvp01OmAt71iq+62MXVcLVKEWDpiuZSj8m83RlY5AGIaPaGX9LKPcHdGxKw
+QSczgB/jI3G08vWaq82he6UJuYexbYe1iJXfvcx8kThwZ1nXQJm+7UsISUsh8/NZ
+x0n/
+=uxDD
+-----END PGP PRIVATE KEY BLOCK-----
diff --git a/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/env_add.yml b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/env_add.yml
new file mode 100644
index 0000000..e29d197
--- /dev/null
+++ b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/env_add.yml
@@ -0,0 +1,136 @@
+parameters:
+  reclass:
+    storage:
+      node:
+        infra_kvm_node01:
+          params:
+            linux_network_interfaces:
+              br_mesh:
+                address: ${_param:_esc}{_param:tenant_address}
+                enabled: true
+                netmask: ${_param:_esc}{_param:tenant_network_netmask}
+                proto: static
+                type: bridge
+                use_interfaces:
+                - bond0.${_param:_esc}{_param:tenant_vlan}
+                require_interfaces:
+                - bond0.${_param:_esc}{_param:tenant_vlan}
+              bond0.tenant_vlan:
+                name: bond0.${_param:_esc}{_param:tenant_vlan}
+                enabled: true
+                proto: manual
+                type: vlan
+                use_interfaces:
+                - bond0
+                require_interfaces:
+                - bond0
+        infra_kvm_node02:
+          params:
+            linux_network_interfaces:
+              br_mesh:
+                address: ${_param:_esc}{_param:tenant_address}
+                enabled: true
+                netmask: ${_param:_esc}{_param:tenant_network_netmask}
+                proto: static
+                type: bridge
+                use_interfaces:
+                - bond0.${_param:_esc}{_param:tenant_vlan}
+                require_interfaces:
+                - bond0.${_param:_esc}{_param:tenant_vlan}
+              bond0.tenant_vlan:
+                name: bond0.${_param:_esc}{_param:tenant_vlan}
+                enabled: true
+                proto: manual
+                type: vlan
+                use_interfaces:
+                - bond0
+                require_interfaces:
+                - bond0
+        infra_kvm_node03:
+          params:
+            linux_network_interfaces:
+              br_mesh:
+                address: ${_param:_esc}{_param:tenant_address}
+                enabled: true
+                netmask: ${_param:_esc}{_param:tenant_network_netmask}
+                proto: static
+                type: bridge
+                use_interfaces:
+                - bond0.${_param:_esc}{_param:tenant_vlan}
+                require_interfaces:
+                - bond0.${_param:_esc}{_param:tenant_vlan}
+              bond0.tenant_vlan:
+                name: bond0.${_param:_esc}{_param:tenant_vlan}
+                enabled: true
+                proto: manual
+                type: vlan
+                use_interfaces:
+                - bond0
+                require_interfaces:
+                - bond0
+        infra_kvm_node04:
+          params:
+            linux_network_interfaces:
+              br_mesh:
+                address: ${_param:_esc}{_param:tenant_address}
+                enabled: true
+                netmask: ${_param:_esc}{_param:tenant_network_netmask}
+                proto: static
+                type: bridge
+                use_interfaces:
+                - bond0.${_param:_esc}{_param:tenant_vlan}
+                require_interfaces:
+                - bond0.${_param:_esc}{_param:tenant_vlan}
+              bond0.tenant_vlan:
+                name: bond0.${_param:_esc}{_param:tenant_vlan}
+                enabled: true
+                proto: manual
+                type: vlan
+                use_interfaces:
+                - bond0
+                require_interfaces:
+                - bond0
+        infra_kvm_node05:
+          params:
+            linux_network_interfaces:
+              br_mesh:
+                address: ${_param:_esc}{_param:tenant_address}
+                enabled: true
+                netmask: ${_param:_esc}{_param:tenant_network_netmask}
+                proto: static
+                type: bridge
+                use_interfaces:
+                - bond0.${_param:_esc}{_param:tenant_vlan}
+                require_interfaces:
+                - bond0.${_param:_esc}{_param:tenant_vlan}
+              bond0.tenant_vlan:
+                name: bond0.${_param:_esc}{_param:tenant_vlan}
+                enabled: true
+                proto: manual
+                type: vlan
+                use_interfaces:
+                - bond0
+                require_interfaces:
+                - bond0
+        infra_kvm_node06:
+          params:
+            linux_network_interfaces:
+              br_mesh:
+                address: ${_param:_esc}{_param:tenant_address}
+                enabled: true
+                netmask: ${_param:_esc}{_param:tenant_network_netmask}
+                proto: static
+                type: bridge
+                use_interfaces:
+                - bond0.${_param:_esc}{_param:tenant_vlan}
+                require_interfaces:
+                - bond0.${_param:_esc}{_param:tenant_vlan}
+              bond0.tenant_vlan:
+                name: bond0.${_param:_esc}{_param:tenant_vlan}
+                enabled: true
+                proto: manual
+                type: vlan
+                use_interfaces:
+                - bond0
+                require_interfaces:
+                - bond0
diff --git a/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/infra_config_nodes_add.yml b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/infra_config_nodes_add.yml
new file mode 100644
index 0000000..db3c3a5
--- /dev/null
+++ b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/infra_config_nodes_add.yml
@@ -0,0 +1,42 @@
+parameters:
+  reclass:
+    storage:
+      node:
+        infra_kvm_node01:
+          params:
+            tenant_address: 10.167.12.101
+        infra_kvm_node02:
+          params:
+            tenant_address: 10.167.12.102
+        infra_kvm_node03:
+          params:
+            tenant_address: 10.167.12.103
+        infra_kvm_node04:
+          params:
+            tenant_address: 10.167.12.104
+        infra_kvm_node05:
+          params:
+            tenant_address: 10.167.12.105
+        infra_kvm_node06:
+          params:
+            tenant_address: 10.167.12.106
+        openstack_compute_rack02:
+          name: ${_param:openstack_compute_rack02_hostname}<<count>>
+          domain: ${_param:cluster_domain}
+          classes:
+          - cluster.${_param:cluster_name}.openstack.compute.vcmp
+          repeat:
+            count: 254
+            ip_ranges:
+              single_address: 10.167.10.1-10.167.10.254
+              tenant_address: 10.167.13.1-10.167.13.254
+            start: 1
+            digits: 1
+            params:
+              single_address:
+                value: <<single_address>>
+              tenant_address:
+                value: <<tenant_address>>
+          params:
+            salt_master_host: ${_param:reclass_config_master}
+            linux_system_codename: xenial
\ No newline at end of file
diff --git a/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/infra_kvm_add.yml b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/infra_kvm_add.yml
new file mode 100644
index 0000000..8d25992
--- /dev/null
+++ b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/infra_kvm_add.yml
@@ -0,0 +1,16 @@
+classes:
+- cluster.released-bm-b300-cicd-queens-ovs-maas.infra.openstack_vcompute_cluster
+parameters:
+  virt:
+    nic:
+      vcmp:
+        eth4:
+          bridge: br_mgm
+        eth3:
+          bridge: br_ctl
+        eth2:
+          bridge: br_baremetal
+        eth1:
+          bridge: br_mesh
+        eth0:
+          bridge: br_baremetal
\ No newline at end of file
diff --git a/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/openstack_init_add.yml b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/openstack_init_add.yml
new file mode 100644
index 0000000..ba8cb40
--- /dev/null
+++ b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/openstack_init_add.yml
@@ -0,0 +1,1770 @@
+parameters:
+  _param:
+    openstack_compute_rack02_hostname: vcmp
+    openstack_vcmp_node3_hostname: vcmp3
+    openstack_vcmp_node3_address: 10.167.10.3
+    openstack_vcmp_node4_hostname: vcmp4
+    openstack_vcmp_node4_address: 10.167.10.4
+    openstack_vcmp_node5_hostname: vcmp5
+    openstack_vcmp_node5_address: 10.167.10.5
+    openstack_vcmp_node6_hostname: vcmp6
+    openstack_vcmp_node6_address: 10.167.10.6
+    openstack_vcmp_node7_hostname: vcmp7
+    openstack_vcmp_node7_address: 10.167.10.7
+    openstack_vcmp_node8_hostname: vcmp8
+    openstack_vcmp_node8_address: 10.167.10.8
+    openstack_vcmp_node9_hostname: vcmp9
+    openstack_vcmp_node9_address: 10.167.10.9
+    openstack_vcmp_node10_hostname: vcmp10
+    openstack_vcmp_node10_address: 10.167.10.10
+    openstack_vcmp_node11_hostname: vcmp11
+    openstack_vcmp_node11_address: 10.167.10.11
+    openstack_vcmp_node12_hostname: vcmp12
+    openstack_vcmp_node12_address: 10.167.10.12
+    openstack_vcmp_node13_hostname: vcmp13
+    openstack_vcmp_node13_address: 10.167.10.13
+    openstack_vcmp_node14_hostname: vcmp14
+    openstack_vcmp_node14_address: 10.167.10.14
+    openstack_vcmp_node15_hostname: vcmp15
+    openstack_vcmp_node15_address: 10.167.10.15
+    openstack_vcmp_node16_hostname: vcmp16
+    openstack_vcmp_node16_address: 10.167.10.16
+    openstack_vcmp_node17_hostname: vcmp17
+    openstack_vcmp_node17_address: 10.167.10.17
+    openstack_vcmp_node18_hostname: vcmp18
+    openstack_vcmp_node18_address: 10.167.10.18
+    openstack_vcmp_node19_hostname: vcmp19
+    openstack_vcmp_node19_address: 10.167.10.19
+    openstack_vcmp_node20_hostname: vcmp20
+    openstack_vcmp_node20_address: 10.167.10.20
+    openstack_vcmp_node21_hostname: vcmp21
+    openstack_vcmp_node21_address: 10.167.10.21
+    openstack_vcmp_node22_hostname: vcmp22
+    openstack_vcmp_node22_address: 10.167.10.22
+    openstack_vcmp_node23_hostname: vcmp23
+    openstack_vcmp_node23_address: 10.167.10.23
+    openstack_vcmp_node24_hostname: vcmp24
+    openstack_vcmp_node24_address: 10.167.10.24
+    openstack_vcmp_node25_hostname: vcmp25
+    openstack_vcmp_node25_address: 10.167.10.25
+    openstack_vcmp_node26_hostname: vcmp26
+    openstack_vcmp_node26_address: 10.167.10.26
+    openstack_vcmp_node27_hostname: vcmp27
+    openstack_vcmp_node27_address: 10.167.10.27
+    openstack_vcmp_node28_hostname: vcmp28
+    openstack_vcmp_node28_address: 10.167.10.28
+    openstack_vcmp_node29_hostname: vcmp29
+    openstack_vcmp_node29_address: 10.167.10.29
+    openstack_vcmp_node30_hostname: vcmp30
+    openstack_vcmp_node30_address: 10.167.10.30
+    openstack_vcmp_node31_hostname: vcmp31
+    openstack_vcmp_node31_address: 10.167.10.31
+    openstack_vcmp_node32_hostname: vcmp32
+    openstack_vcmp_node32_address: 10.167.10.32
+    openstack_vcmp_node33_hostname: vcmp33
+    openstack_vcmp_node33_address: 10.167.10.33
+    openstack_vcmp_node34_hostname: vcmp34
+    openstack_vcmp_node34_address: 10.167.10.34
+    openstack_vcmp_node35_hostname: vcmp35
+    openstack_vcmp_node35_address: 10.167.10.35
+    openstack_vcmp_node36_hostname: vcmp36
+    openstack_vcmp_node36_address: 10.167.10.36
+    openstack_vcmp_node37_hostname: vcmp37
+    openstack_vcmp_node37_address: 10.167.10.37
+    openstack_vcmp_node38_hostname: vcmp38
+    openstack_vcmp_node38_address: 10.167.10.38
+    openstack_vcmp_node39_hostname: vcmp39
+    openstack_vcmp_node39_address: 10.167.10.39
+    openstack_vcmp_node40_hostname: vcmp40
+    openstack_vcmp_node40_address: 10.167.10.40
+    openstack_vcmp_node41_hostname: vcmp41
+    openstack_vcmp_node41_address: 10.167.10.41
+    openstack_vcmp_node42_hostname: vcmp42
+    openstack_vcmp_node42_address: 10.167.10.42
+    openstack_vcmp_node43_hostname: vcmp43
+    openstack_vcmp_node43_address: 10.167.10.43
+    openstack_vcmp_node44_hostname: vcmp44
+    openstack_vcmp_node44_address: 10.167.10.44
+    openstack_vcmp_node45_hostname: vcmp45
+    openstack_vcmp_node45_address: 10.167.10.45
+    openstack_vcmp_node46_hostname: vcmp46
+    openstack_vcmp_node46_address: 10.167.10.46
+    openstack_vcmp_node47_hostname: vcmp47
+    openstack_vcmp_node47_address: 10.167.10.47
+    openstack_vcmp_node48_hostname: vcmp48
+    openstack_vcmp_node48_address: 10.167.10.48
+    openstack_vcmp_node49_hostname: vcmp49
+    openstack_vcmp_node49_address: 10.167.10.49
+    openstack_vcmp_node50_hostname: vcmp50
+    openstack_vcmp_node50_address: 10.167.10.50
+    openstack_vcmp_node51_hostname: vcmp51
+    openstack_vcmp_node51_address: 10.167.10.51
+    openstack_vcmp_node52_hostname: vcmp52
+    openstack_vcmp_node52_address: 10.167.10.52
+    openstack_vcmp_node53_hostname: vcmp53
+    openstack_vcmp_node53_address: 10.167.10.53
+    openstack_vcmp_node54_hostname: vcmp54
+    openstack_vcmp_node54_address: 10.167.10.54
+    openstack_vcmp_node55_hostname: vcmp55
+    openstack_vcmp_node55_address: 10.167.10.55
+    openstack_vcmp_node56_hostname: vcmp56
+    openstack_vcmp_node56_address: 10.167.10.56
+    openstack_vcmp_node57_hostname: vcmp57
+    openstack_vcmp_node57_address: 10.167.10.57
+    openstack_vcmp_node58_hostname: vcmp58
+    openstack_vcmp_node58_address: 10.167.10.58
+    openstack_vcmp_node59_hostname: vcmp59
+    openstack_vcmp_node59_address: 10.167.10.59
+    openstack_vcmp_node60_hostname: vcmp60
+    openstack_vcmp_node60_address: 10.167.10.60
+    openstack_vcmp_node61_hostname: vcmp61
+    openstack_vcmp_node61_address: 10.167.10.61
+    openstack_vcmp_node62_hostname: vcmp62
+    openstack_vcmp_node62_address: 10.167.10.62
+    openstack_vcmp_node63_hostname: vcmp63
+    openstack_vcmp_node63_address: 10.167.10.63
+    openstack_vcmp_node64_hostname: vcmp64
+    openstack_vcmp_node64_address: 10.167.10.64
+    openstack_vcmp_node65_hostname: vcmp65
+    openstack_vcmp_node65_address: 10.167.10.65
+    openstack_vcmp_node66_hostname: vcmp66
+    openstack_vcmp_node66_address: 10.167.10.66
+    openstack_vcmp_node67_hostname: vcmp67
+    openstack_vcmp_node67_address: 10.167.10.67
+    openstack_vcmp_node68_hostname: vcmp68
+    openstack_vcmp_node68_address: 10.167.10.68
+    openstack_vcmp_node69_hostname: vcmp69
+    openstack_vcmp_node69_address: 10.167.10.69
+    openstack_vcmp_node70_hostname: vcmp70
+    openstack_vcmp_node70_address: 10.167.10.70
+    openstack_vcmp_node71_hostname: vcmp71
+    openstack_vcmp_node71_address: 10.167.10.71
+    openstack_vcmp_node72_hostname: vcmp72
+    openstack_vcmp_node72_address: 10.167.10.72
+    openstack_vcmp_node73_hostname: vcmp73
+    openstack_vcmp_node73_address: 10.167.10.73
+    openstack_vcmp_node74_hostname: vcmp74
+    openstack_vcmp_node74_address: 10.167.10.74
+    openstack_vcmp_node75_hostname: vcmp75
+    openstack_vcmp_node75_address: 10.167.10.75
+    openstack_vcmp_node76_hostname: vcmp76
+    openstack_vcmp_node76_address: 10.167.10.76
+    openstack_vcmp_node77_hostname: vcmp77
+    openstack_vcmp_node77_address: 10.167.10.77
+    openstack_vcmp_node78_hostname: vcmp78
+    openstack_vcmp_node78_address: 10.167.10.78
+    openstack_vcmp_node79_hostname: vcmp79
+    openstack_vcmp_node79_address: 10.167.10.79
+    openstack_vcmp_node80_hostname: vcmp80
+    openstack_vcmp_node80_address: 10.167.10.80
+    openstack_vcmp_node81_hostname: vcmp81
+    openstack_vcmp_node81_address: 10.167.10.81
+    openstack_vcmp_node82_hostname: vcmp82
+    openstack_vcmp_node82_address: 10.167.10.82
+    openstack_vcmp_node83_hostname: vcmp83
+    openstack_vcmp_node83_address: 10.167.10.83
+    openstack_vcmp_node84_hostname: vcmp84
+    openstack_vcmp_node84_address: 10.167.10.84
+    openstack_vcmp_node85_hostname: vcmp85
+    openstack_vcmp_node85_address: 10.167.10.85
+    openstack_vcmp_node86_hostname: vcmp86
+    openstack_vcmp_node86_address: 10.167.10.86
+    openstack_vcmp_node87_hostname: vcmp87
+    openstack_vcmp_node87_address: 10.167.10.87
+    openstack_vcmp_node88_hostname: vcmp88
+    openstack_vcmp_node88_address: 10.167.10.88
+    openstack_vcmp_node89_hostname: vcmp89
+    openstack_vcmp_node89_address: 10.167.10.89
+    openstack_vcmp_node90_hostname: vcmp90
+    openstack_vcmp_node90_address: 10.167.10.90
+    openstack_vcmp_node91_hostname: vcmp91
+    openstack_vcmp_node91_address: 10.167.10.91
+    openstack_vcmp_node92_hostname: vcmp92
+    openstack_vcmp_node92_address: 10.167.10.92
+    openstack_vcmp_node93_hostname: vcmp93
+    openstack_vcmp_node93_address: 10.167.10.93
+    openstack_vcmp_node94_hostname: vcmp94
+    openstack_vcmp_node94_address: 10.167.10.94
+    openstack_vcmp_node95_hostname: vcmp95
+    openstack_vcmp_node95_address: 10.167.10.95
+    openstack_vcmp_node96_hostname: vcmp96
+    openstack_vcmp_node96_address: 10.167.10.96
+    openstack_vcmp_node97_hostname: vcmp97
+    openstack_vcmp_node97_address: 10.167.10.97
+    openstack_vcmp_node98_hostname: vcmp98
+    openstack_vcmp_node98_address: 10.167.10.98
+    openstack_vcmp_node99_hostname: vcmp99
+    openstack_vcmp_node99_address: 10.167.10.99
+    openstack_vcmp_node100_hostname: vcmp100
+    openstack_vcmp_node100_address: 10.167.10.100
+    openstack_vcmp_node101_hostname: vcmp101
+    openstack_vcmp_node101_address: 10.167.10.101
+    openstack_vcmp_node102_hostname: vcmp102
+    openstack_vcmp_node102_address: 10.167.10.102
+    openstack_vcmp_node103_hostname: vcmp103
+    openstack_vcmp_node103_address: 10.167.10.103
+    openstack_vcmp_node104_hostname: vcmp104
+    openstack_vcmp_node104_address: 10.167.10.104
+    openstack_vcmp_node105_hostname: vcmp105
+    openstack_vcmp_node105_address: 10.167.10.105
+    openstack_vcmp_node106_hostname: vcmp106
+    openstack_vcmp_node106_address: 10.167.10.106
+    openstack_vcmp_node107_hostname: vcmp107
+    openstack_vcmp_node107_address: 10.167.10.107
+    openstack_vcmp_node108_hostname: vcmp108
+    openstack_vcmp_node108_address: 10.167.10.108
+    openstack_vcmp_node109_hostname: vcmp109
+    openstack_vcmp_node109_address: 10.167.10.109
+    openstack_vcmp_node110_hostname: vcmp110
+    openstack_vcmp_node110_address: 10.167.10.110
+    openstack_vcmp_node111_hostname: vcmp111
+    openstack_vcmp_node111_address: 10.167.10.111
+    openstack_vcmp_node112_hostname: vcmp112
+    openstack_vcmp_node112_address: 10.167.10.112
+    openstack_vcmp_node113_hostname: vcmp113
+    openstack_vcmp_node113_address: 10.167.10.113
+    openstack_vcmp_node114_hostname: vcmp114
+    openstack_vcmp_node114_address: 10.167.10.114
+    openstack_vcmp_node115_hostname: vcmp115
+    openstack_vcmp_node115_address: 10.167.10.115
+    openstack_vcmp_node116_hostname: vcmp116
+    openstack_vcmp_node116_address: 10.167.10.116
+    openstack_vcmp_node117_hostname: vcmp117
+    openstack_vcmp_node117_address: 10.167.10.117
+    openstack_vcmp_node118_hostname: vcmp118
+    openstack_vcmp_node118_address: 10.167.10.118
+    openstack_vcmp_node119_hostname: vcmp119
+    openstack_vcmp_node119_address: 10.167.10.119
+    openstack_vcmp_node120_hostname: vcmp120
+    openstack_vcmp_node120_address: 10.167.10.120
+    openstack_vcmp_node121_hostname: vcmp121
+    openstack_vcmp_node121_address: 10.167.10.121
+    openstack_vcmp_node122_hostname: vcmp122
+    openstack_vcmp_node122_address: 10.167.10.122
+    openstack_vcmp_node123_hostname: vcmp123
+    openstack_vcmp_node123_address: 10.167.10.123
+    openstack_vcmp_node124_hostname: vcmp124
+    openstack_vcmp_node124_address: 10.167.10.124
+    openstack_vcmp_node125_hostname: vcmp125
+    openstack_vcmp_node125_address: 10.167.10.125
+    openstack_vcmp_node126_hostname: vcmp126
+    openstack_vcmp_node126_address: 10.167.10.126
+    openstack_vcmp_node127_hostname: vcmp127
+    openstack_vcmp_node127_address: 10.167.10.127
+    openstack_vcmp_node128_hostname: vcmp128
+    openstack_vcmp_node128_address: 10.167.10.128
+    openstack_vcmp_node129_hostname: vcmp129
+    openstack_vcmp_node129_address: 10.167.10.129
+    openstack_vcmp_node130_hostname: vcmp130
+    openstack_vcmp_node130_address: 10.167.10.130
+    openstack_vcmp_node131_hostname: vcmp131
+    openstack_vcmp_node131_address: 10.167.10.131
+    openstack_vcmp_node132_hostname: vcmp132
+    openstack_vcmp_node132_address: 10.167.10.132
+    openstack_vcmp_node133_hostname: vcmp133
+    openstack_vcmp_node133_address: 10.167.10.133
+    openstack_vcmp_node134_hostname: vcmp134
+    openstack_vcmp_node134_address: 10.167.10.134
+    openstack_vcmp_node135_hostname: vcmp135
+    openstack_vcmp_node135_address: 10.167.10.135
+    openstack_vcmp_node136_hostname: vcmp136
+    openstack_vcmp_node136_address: 10.167.10.136
+    openstack_vcmp_node137_hostname: vcmp137
+    openstack_vcmp_node137_address: 10.167.10.137
+    openstack_vcmp_node138_hostname: vcmp138
+    openstack_vcmp_node138_address: 10.167.10.138
+    openstack_vcmp_node139_hostname: vcmp139
+    openstack_vcmp_node139_address: 10.167.10.139
+    openstack_vcmp_node140_hostname: vcmp140
+    openstack_vcmp_node140_address: 10.167.10.140
+    openstack_vcmp_node141_hostname: vcmp141
+    openstack_vcmp_node141_address: 10.167.10.141
+    openstack_vcmp_node142_hostname: vcmp142
+    openstack_vcmp_node142_address: 10.167.10.142
+    openstack_vcmp_node143_hostname: vcmp143
+    openstack_vcmp_node143_address: 10.167.10.143
+    openstack_vcmp_node144_hostname: vcmp144
+    openstack_vcmp_node144_address: 10.167.10.144
+    openstack_vcmp_node145_hostname: vcmp145
+    openstack_vcmp_node145_address: 10.167.10.145
+    openstack_vcmp_node146_hostname: vcmp146
+    openstack_vcmp_node146_address: 10.167.10.146
+    openstack_vcmp_node147_hostname: vcmp147
+    openstack_vcmp_node147_address: 10.167.10.147
+    openstack_vcmp_node148_hostname: vcmp148
+    openstack_vcmp_node148_address: 10.167.10.148
+    openstack_vcmp_node149_hostname: vcmp149
+    openstack_vcmp_node149_address: 10.167.10.149
+    openstack_vcmp_node150_hostname: vcmp150
+    openstack_vcmp_node150_address: 10.167.10.150
+    openstack_vcmp_node151_hostname: vcmp151
+    openstack_vcmp_node151_address: 10.167.10.151
+    openstack_vcmp_node152_hostname: vcmp152
+    openstack_vcmp_node152_address: 10.167.10.152
+    openstack_vcmp_node153_hostname: vcmp153
+    openstack_vcmp_node153_address: 10.167.10.153
+    openstack_vcmp_node154_hostname: vcmp154
+    openstack_vcmp_node154_address: 10.167.10.154
+    openstack_vcmp_node155_hostname: vcmp155
+    openstack_vcmp_node155_address: 10.167.10.155
+    openstack_vcmp_node156_hostname: vcmp156
+    openstack_vcmp_node156_address: 10.167.10.156
+    openstack_vcmp_node157_hostname: vcmp157
+    openstack_vcmp_node157_address: 10.167.10.157
+    openstack_vcmp_node158_hostname: vcmp158
+    openstack_vcmp_node158_address: 10.167.10.158
+    openstack_vcmp_node159_hostname: vcmp159
+    openstack_vcmp_node159_address: 10.167.10.159
+    openstack_vcmp_node160_hostname: vcmp160
+    openstack_vcmp_node160_address: 10.167.10.160
+    openstack_vcmp_node161_hostname: vcmp161
+    openstack_vcmp_node161_address: 10.167.10.161
+    openstack_vcmp_node162_hostname: vcmp162
+    openstack_vcmp_node162_address: 10.167.10.162
+    openstack_vcmp_node163_hostname: vcmp163
+    openstack_vcmp_node163_address: 10.167.10.163
+    openstack_vcmp_node164_hostname: vcmp164
+    openstack_vcmp_node164_address: 10.167.10.164
+    openstack_vcmp_node165_hostname: vcmp165
+    openstack_vcmp_node165_address: 10.167.10.165
+    openstack_vcmp_node166_hostname: vcmp166
+    openstack_vcmp_node166_address: 10.167.10.166
+    openstack_vcmp_node167_hostname: vcmp167
+    openstack_vcmp_node167_address: 10.167.10.167
+    openstack_vcmp_node168_hostname: vcmp168
+    openstack_vcmp_node168_address: 10.167.10.168
+    openstack_vcmp_node169_hostname: vcmp169
+    openstack_vcmp_node169_address: 10.167.10.169
+    openstack_vcmp_node170_hostname: vcmp170
+    openstack_vcmp_node170_address: 10.167.10.170
+    openstack_vcmp_node171_hostname: vcmp171
+    openstack_vcmp_node171_address: 10.167.10.171
+    openstack_vcmp_node172_hostname: vcmp172
+    openstack_vcmp_node172_address: 10.167.10.172
+    openstack_vcmp_node173_hostname: vcmp173
+    openstack_vcmp_node173_address: 10.167.10.173
+    openstack_vcmp_node174_hostname: vcmp174
+    openstack_vcmp_node174_address: 10.167.10.174
+    openstack_vcmp_node175_hostname: vcmp175
+    openstack_vcmp_node175_address: 10.167.10.175
+    openstack_vcmp_node176_hostname: vcmp176
+    openstack_vcmp_node176_address: 10.167.10.176
+    openstack_vcmp_node177_hostname: vcmp177
+    openstack_vcmp_node177_address: 10.167.10.177
+    openstack_vcmp_node178_hostname: vcmp178
+    openstack_vcmp_node178_address: 10.167.10.178
+    openstack_vcmp_node179_hostname: vcmp179
+    openstack_vcmp_node179_address: 10.167.10.179
+    openstack_vcmp_node180_hostname: vcmp180
+    openstack_vcmp_node180_address: 10.167.10.180
+    openstack_vcmp_node181_hostname: vcmp181
+    openstack_vcmp_node181_address: 10.167.10.181
+    openstack_vcmp_node182_hostname: vcmp182
+    openstack_vcmp_node182_address: 10.167.10.182
+    openstack_vcmp_node183_hostname: vcmp183
+    openstack_vcmp_node183_address: 10.167.10.183
+    openstack_vcmp_node184_hostname: vcmp184
+    openstack_vcmp_node184_address: 10.167.10.184
+    openstack_vcmp_node185_hostname: vcmp185
+    openstack_vcmp_node185_address: 10.167.10.185
+    openstack_vcmp_node186_hostname: vcmp186
+    openstack_vcmp_node186_address: 10.167.10.186
+    openstack_vcmp_node187_hostname: vcmp187
+    openstack_vcmp_node187_address: 10.167.10.187
+    openstack_vcmp_node188_hostname: vcmp188
+    openstack_vcmp_node188_address: 10.167.10.188
+    openstack_vcmp_node189_hostname: vcmp189
+    openstack_vcmp_node189_address: 10.167.10.189
+    openstack_vcmp_node190_hostname: vcmp190
+    openstack_vcmp_node190_address: 10.167.10.190
+    openstack_vcmp_node191_hostname: vcmp191
+    openstack_vcmp_node191_address: 10.167.10.191
+    openstack_vcmp_node192_hostname: vcmp192
+    openstack_vcmp_node192_address: 10.167.10.192
+    openstack_vcmp_node193_hostname: vcmp193
+    openstack_vcmp_node193_address: 10.167.10.193
+    openstack_vcmp_node194_hostname: vcmp194
+    openstack_vcmp_node194_address: 10.167.10.194
+    openstack_vcmp_node195_hostname: vcmp195
+    openstack_vcmp_node195_address: 10.167.10.195
+    openstack_vcmp_node196_hostname: vcmp196
+    openstack_vcmp_node196_address: 10.167.10.196
+    openstack_vcmp_node197_hostname: vcmp197
+    openstack_vcmp_node197_address: 10.167.10.197
+    openstack_vcmp_node198_hostname: vcmp198
+    openstack_vcmp_node198_address: 10.167.10.198
+    openstack_vcmp_node199_hostname: vcmp199
+    openstack_vcmp_node199_address: 10.167.10.199
+    openstack_vcmp_node200_hostname: vcmp200
+    openstack_vcmp_node200_address: 10.167.10.200
+    openstack_vcmp_node201_hostname: vcmp201
+    openstack_vcmp_node201_address: 10.167.10.201
+    openstack_vcmp_node202_hostname: vcmp202
+    openstack_vcmp_node202_address: 10.167.10.202
+    openstack_vcmp_node203_hostname: vcmp203
+    openstack_vcmp_node203_address: 10.167.10.203
+    openstack_vcmp_node204_hostname: vcmp204
+    openstack_vcmp_node204_address: 10.167.10.204
+    openstack_vcmp_node205_hostname: vcmp205
+    openstack_vcmp_node205_address: 10.167.10.205
+    openstack_vcmp_node206_hostname: vcmp206
+    openstack_vcmp_node206_address: 10.167.10.206
+    openstack_vcmp_node207_hostname: vcmp207
+    openstack_vcmp_node207_address: 10.167.10.207
+    openstack_vcmp_node208_hostname: vcmp208
+    openstack_vcmp_node208_address: 10.167.10.208
+    openstack_vcmp_node209_hostname: vcmp209
+    openstack_vcmp_node209_address: 10.167.10.209
+    openstack_vcmp_node210_hostname: vcmp210
+    openstack_vcmp_node210_address: 10.167.10.210
+    openstack_vcmp_node211_hostname: vcmp211
+    openstack_vcmp_node211_address: 10.167.10.211
+    openstack_vcmp_node212_hostname: vcmp212
+    openstack_vcmp_node212_address: 10.167.10.212
+    openstack_vcmp_node213_hostname: vcmp213
+    openstack_vcmp_node213_address: 10.167.10.213
+    openstack_vcmp_node214_hostname: vcmp214
+    openstack_vcmp_node214_address: 10.167.10.214
+    openstack_vcmp_node215_hostname: vcmp215
+    openstack_vcmp_node215_address: 10.167.10.215
+    openstack_vcmp_node216_hostname: vcmp216
+    openstack_vcmp_node216_address: 10.167.10.216
+    openstack_vcmp_node217_hostname: vcmp217
+    openstack_vcmp_node217_address: 10.167.10.217
+    openstack_vcmp_node218_hostname: vcmp218
+    openstack_vcmp_node218_address: 10.167.10.218
+    openstack_vcmp_node219_hostname: vcmp219
+    openstack_vcmp_node219_address: 10.167.10.219
+    openstack_vcmp_node220_hostname: vcmp220
+    openstack_vcmp_node220_address: 10.167.10.220
+    openstack_vcmp_node221_hostname: vcmp221
+    openstack_vcmp_node221_address: 10.167.10.221
+    openstack_vcmp_node222_hostname: vcmp222
+    openstack_vcmp_node222_address: 10.167.10.222
+    openstack_vcmp_node223_hostname: vcmp223
+    openstack_vcmp_node223_address: 10.167.10.223
+    openstack_vcmp_node224_hostname: vcmp224
+    openstack_vcmp_node224_address: 10.167.10.224
+    openstack_vcmp_node225_hostname: vcmp225
+    openstack_vcmp_node225_address: 10.167.10.225
+    openstack_vcmp_node226_hostname: vcmp226
+    openstack_vcmp_node226_address: 10.167.10.226
+    openstack_vcmp_node227_hostname: vcmp227
+    openstack_vcmp_node227_address: 10.167.10.227
+    openstack_vcmp_node228_hostname: vcmp228
+    openstack_vcmp_node228_address: 10.167.10.228
+    openstack_vcmp_node229_hostname: vcmp229
+    openstack_vcmp_node229_address: 10.167.10.229
+    openstack_vcmp_node230_hostname: vcmp230
+    openstack_vcmp_node230_address: 10.167.10.230
+    openstack_vcmp_node231_hostname: vcmp231
+    openstack_vcmp_node231_address: 10.167.10.231
+    openstack_vcmp_node232_hostname: vcmp232
+    openstack_vcmp_node232_address: 10.167.10.232
+    openstack_vcmp_node233_hostname: vcmp233
+    openstack_vcmp_node233_address: 10.167.10.233
+    openstack_vcmp_node234_hostname: vcmp234
+    openstack_vcmp_node234_address: 10.167.10.234
+    openstack_vcmp_node235_hostname: vcmp235
+    openstack_vcmp_node235_address: 10.167.10.235
+    openstack_vcmp_node236_hostname: vcmp236
+    openstack_vcmp_node236_address: 10.167.10.236
+    openstack_vcmp_node237_hostname: vcmp237
+    openstack_vcmp_node237_address: 10.167.10.237
+    openstack_vcmp_node238_hostname: vcmp238
+    openstack_vcmp_node238_address: 10.167.10.238
+    openstack_vcmp_node239_hostname: vcmp239
+    openstack_vcmp_node239_address: 10.167.10.239
+    openstack_vcmp_node240_hostname: vcmp240
+    openstack_vcmp_node240_address: 10.167.10.240
+    openstack_vcmp_node241_hostname: vcmp241
+    openstack_vcmp_node241_address: 10.167.10.241
+    openstack_vcmp_node242_hostname: vcmp242
+    openstack_vcmp_node242_address: 10.167.10.242
+    openstack_vcmp_node243_hostname: vcmp243
+    openstack_vcmp_node243_address: 10.167.10.243
+    openstack_vcmp_node244_hostname: vcmp244
+    openstack_vcmp_node244_address: 10.167.10.244
+    openstack_vcmp_node245_hostname: vcmp245
+    openstack_vcmp_node245_address: 10.167.10.245
+    openstack_vcmp_node246_hostname: vcmp246
+    openstack_vcmp_node246_address: 10.167.10.246
+    openstack_vcmp_node247_hostname: vcmp247
+    openstack_vcmp_node247_address: 10.167.10.247
+    openstack_vcmp_node248_hostname: vcmp248
+    openstack_vcmp_node248_address: 10.167.10.248
+    openstack_vcmp_node249_hostname: vcmp249
+    openstack_vcmp_node249_address: 10.167.10.249
+    openstack_vcmp_node250_hostname: vcmp250
+    openstack_vcmp_node250_address: 10.167.10.250
+    openstack_vcmp_node251_hostname: vcmp251
+    openstack_vcmp_node251_address: 10.167.10.251
+    openstack_vcmp_node252_hostname: vcmp252
+    openstack_vcmp_node252_address: 10.167.10.252
+    openstack_vcmp_node253_hostname: vcmp253
+    openstack_vcmp_node253_address: 10.167.10.253
+    openstack_vcmp_node254_hostname: vcmp254
+    openstack_vcmp_node254_address: 10.167.10.254
+  linux:
+    network:
+      host:
+        vcmp3:
+          address: ${_param:openstack_vcmp_node3_address}
+          names:
+          - ${_param:openstack_vcmp_node3_hostname}
+          - ${_param:openstack_vcmp_node3_hostname}.${_param:cluster_domain}
+        vcmp4:
+          address: ${_param:openstack_vcmp_node4_address}
+          names:
+          - ${_param:openstack_vcmp_node4_hostname}
+          - ${_param:openstack_vcmp_node4_hostname}.${_param:cluster_domain}
+        vcmp5:
+          address: ${_param:openstack_vcmp_node5_address}
+          names:
+          - ${_param:openstack_vcmp_node5_hostname}
+          - ${_param:openstack_vcmp_node5_hostname}.${_param:cluster_domain}
+        vcmp6:
+          address: ${_param:openstack_vcmp_node6_address}
+          names:
+          - ${_param:openstack_vcmp_node6_hostname}
+          - ${_param:openstack_vcmp_node6_hostname}.${_param:cluster_domain}
+        vcmp7:
+          address: ${_param:openstack_vcmp_node7_address}
+          names:
+          - ${_param:openstack_vcmp_node7_hostname}
+          - ${_param:openstack_vcmp_node7_hostname}.${_param:cluster_domain}
+        vcmp8:
+          address: ${_param:openstack_vcmp_node8_address}
+          names:
+          - ${_param:openstack_vcmp_node8_hostname}
+          - ${_param:openstack_vcmp_node8_hostname}.${_param:cluster_domain}
+        vcmp9:
+          address: ${_param:openstack_vcmp_node9_address}
+          names:
+          - ${_param:openstack_vcmp_node9_hostname}
+          - ${_param:openstack_vcmp_node9_hostname}.${_param:cluster_domain}
+        vcmp10:
+          address: ${_param:openstack_vcmp_node10_address}
+          names:
+          - ${_param:openstack_vcmp_node10_hostname}
+          - ${_param:openstack_vcmp_node10_hostname}.${_param:cluster_domain}
+        vcmp11:
+          address: ${_param:openstack_vcmp_node11_address}
+          names:
+          - ${_param:openstack_vcmp_node11_hostname}
+          - ${_param:openstack_vcmp_node11_hostname}.${_param:cluster_domain}
+        vcmp12:
+          address: ${_param:openstack_vcmp_node12_address}
+          names:
+          - ${_param:openstack_vcmp_node12_hostname}
+          - ${_param:openstack_vcmp_node12_hostname}.${_param:cluster_domain}
+        vcmp13:
+          address: ${_param:openstack_vcmp_node13_address}
+          names:
+          - ${_param:openstack_vcmp_node13_hostname}
+          - ${_param:openstack_vcmp_node13_hostname}.${_param:cluster_domain}
+        vcmp14:
+          address: ${_param:openstack_vcmp_node14_address}
+          names:
+          - ${_param:openstack_vcmp_node14_hostname}
+          - ${_param:openstack_vcmp_node14_hostname}.${_param:cluster_domain}
+        vcmp15:
+          address: ${_param:openstack_vcmp_node15_address}
+          names:
+          - ${_param:openstack_vcmp_node15_hostname}
+          - ${_param:openstack_vcmp_node15_hostname}.${_param:cluster_domain}
+        vcmp16:
+          address: ${_param:openstack_vcmp_node16_address}
+          names:
+          - ${_param:openstack_vcmp_node16_hostname}
+          - ${_param:openstack_vcmp_node16_hostname}.${_param:cluster_domain}
+        vcmp17:
+          address: ${_param:openstack_vcmp_node17_address}
+          names:
+          - ${_param:openstack_vcmp_node17_hostname}
+          - ${_param:openstack_vcmp_node17_hostname}.${_param:cluster_domain}
+        vcmp18:
+          address: ${_param:openstack_vcmp_node18_address}
+          names:
+          - ${_param:openstack_vcmp_node18_hostname}
+          - ${_param:openstack_vcmp_node18_hostname}.${_param:cluster_domain}
+        vcmp19:
+          address: ${_param:openstack_vcmp_node19_address}
+          names:
+          - ${_param:openstack_vcmp_node19_hostname}
+          - ${_param:openstack_vcmp_node19_hostname}.${_param:cluster_domain}
+        vcmp20:
+          address: ${_param:openstack_vcmp_node20_address}
+          names:
+          - ${_param:openstack_vcmp_node20_hostname}
+          - ${_param:openstack_vcmp_node20_hostname}.${_param:cluster_domain}
+        vcmp21:
+          address: ${_param:openstack_vcmp_node21_address}
+          names:
+          - ${_param:openstack_vcmp_node21_hostname}
+          - ${_param:openstack_vcmp_node21_hostname}.${_param:cluster_domain}
+        vcmp22:
+          address: ${_param:openstack_vcmp_node22_address}
+          names:
+          - ${_param:openstack_vcmp_node22_hostname}
+          - ${_param:openstack_vcmp_node22_hostname}.${_param:cluster_domain}
+        vcmp23:
+          address: ${_param:openstack_vcmp_node23_address}
+          names:
+          - ${_param:openstack_vcmp_node23_hostname}
+          - ${_param:openstack_vcmp_node23_hostname}.${_param:cluster_domain}
+        vcmp24:
+          address: ${_param:openstack_vcmp_node24_address}
+          names:
+          - ${_param:openstack_vcmp_node24_hostname}
+          - ${_param:openstack_vcmp_node24_hostname}.${_param:cluster_domain}
+        vcmp25:
+          address: ${_param:openstack_vcmp_node25_address}
+          names:
+          - ${_param:openstack_vcmp_node25_hostname}
+          - ${_param:openstack_vcmp_node25_hostname}.${_param:cluster_domain}
+        vcmp26:
+          address: ${_param:openstack_vcmp_node26_address}
+          names:
+          - ${_param:openstack_vcmp_node26_hostname}
+          - ${_param:openstack_vcmp_node26_hostname}.${_param:cluster_domain}
+        vcmp27:
+          address: ${_param:openstack_vcmp_node27_address}
+          names:
+          - ${_param:openstack_vcmp_node27_hostname}
+          - ${_param:openstack_vcmp_node27_hostname}.${_param:cluster_domain}
+        vcmp28:
+          address: ${_param:openstack_vcmp_node28_address}
+          names:
+          - ${_param:openstack_vcmp_node28_hostname}
+          - ${_param:openstack_vcmp_node28_hostname}.${_param:cluster_domain}
+        vcmp29:
+          address: ${_param:openstack_vcmp_node29_address}
+          names:
+          - ${_param:openstack_vcmp_node29_hostname}
+          - ${_param:openstack_vcmp_node29_hostname}.${_param:cluster_domain}
+        vcmp30:
+          address: ${_param:openstack_vcmp_node30_address}
+          names:
+          - ${_param:openstack_vcmp_node30_hostname}
+          - ${_param:openstack_vcmp_node30_hostname}.${_param:cluster_domain}
+        vcmp31:
+          address: ${_param:openstack_vcmp_node31_address}
+          names:
+          - ${_param:openstack_vcmp_node31_hostname}
+          - ${_param:openstack_vcmp_node31_hostname}.${_param:cluster_domain}
+        vcmp32:
+          address: ${_param:openstack_vcmp_node32_address}
+          names:
+          - ${_param:openstack_vcmp_node32_hostname}
+          - ${_param:openstack_vcmp_node32_hostname}.${_param:cluster_domain}
+        vcmp33:
+          address: ${_param:openstack_vcmp_node33_address}
+          names:
+          - ${_param:openstack_vcmp_node33_hostname}
+          - ${_param:openstack_vcmp_node33_hostname}.${_param:cluster_domain}
+        vcmp34:
+          address: ${_param:openstack_vcmp_node34_address}
+          names:
+          - ${_param:openstack_vcmp_node34_hostname}
+          - ${_param:openstack_vcmp_node34_hostname}.${_param:cluster_domain}
+        vcmp35:
+          address: ${_param:openstack_vcmp_node35_address}
+          names:
+          - ${_param:openstack_vcmp_node35_hostname}
+          - ${_param:openstack_vcmp_node35_hostname}.${_param:cluster_domain}
+        vcmp36:
+          address: ${_param:openstack_vcmp_node36_address}
+          names:
+          - ${_param:openstack_vcmp_node36_hostname}
+          - ${_param:openstack_vcmp_node36_hostname}.${_param:cluster_domain}
+        vcmp37:
+          address: ${_param:openstack_vcmp_node37_address}
+          names:
+          - ${_param:openstack_vcmp_node37_hostname}
+          - ${_param:openstack_vcmp_node37_hostname}.${_param:cluster_domain}
+        vcmp38:
+          address: ${_param:openstack_vcmp_node38_address}
+          names:
+          - ${_param:openstack_vcmp_node38_hostname}
+          - ${_param:openstack_vcmp_node38_hostname}.${_param:cluster_domain}
+        vcmp39:
+          address: ${_param:openstack_vcmp_node39_address}
+          names:
+          - ${_param:openstack_vcmp_node39_hostname}
+          - ${_param:openstack_vcmp_node39_hostname}.${_param:cluster_domain}
+        vcmp40:
+          address: ${_param:openstack_vcmp_node40_address}
+          names:
+          - ${_param:openstack_vcmp_node40_hostname}
+          - ${_param:openstack_vcmp_node40_hostname}.${_param:cluster_domain}
+        vcmp41:
+          address: ${_param:openstack_vcmp_node41_address}
+          names:
+          - ${_param:openstack_vcmp_node41_hostname}
+          - ${_param:openstack_vcmp_node41_hostname}.${_param:cluster_domain}
+        vcmp42:
+          address: ${_param:openstack_vcmp_node42_address}
+          names:
+          - ${_param:openstack_vcmp_node42_hostname}
+          - ${_param:openstack_vcmp_node42_hostname}.${_param:cluster_domain}
+        vcmp43:
+          address: ${_param:openstack_vcmp_node43_address}
+          names:
+          - ${_param:openstack_vcmp_node43_hostname}
+          - ${_param:openstack_vcmp_node43_hostname}.${_param:cluster_domain}
+        vcmp44:
+          address: ${_param:openstack_vcmp_node44_address}
+          names:
+          - ${_param:openstack_vcmp_node44_hostname}
+          - ${_param:openstack_vcmp_node44_hostname}.${_param:cluster_domain}
+        vcmp45:
+          address: ${_param:openstack_vcmp_node45_address}
+          names:
+          - ${_param:openstack_vcmp_node45_hostname}
+          - ${_param:openstack_vcmp_node45_hostname}.${_param:cluster_domain}
+        vcmp46:
+          address: ${_param:openstack_vcmp_node46_address}
+          names:
+          - ${_param:openstack_vcmp_node46_hostname}
+          - ${_param:openstack_vcmp_node46_hostname}.${_param:cluster_domain}
+        vcmp47:
+          address: ${_param:openstack_vcmp_node47_address}
+          names:
+          - ${_param:openstack_vcmp_node47_hostname}
+          - ${_param:openstack_vcmp_node47_hostname}.${_param:cluster_domain}
+        vcmp48:
+          address: ${_param:openstack_vcmp_node48_address}
+          names:
+          - ${_param:openstack_vcmp_node48_hostname}
+          - ${_param:openstack_vcmp_node48_hostname}.${_param:cluster_domain}
+        vcmp49:
+          address: ${_param:openstack_vcmp_node49_address}
+          names:
+          - ${_param:openstack_vcmp_node49_hostname}
+          - ${_param:openstack_vcmp_node49_hostname}.${_param:cluster_domain}
+        vcmp50:
+          address: ${_param:openstack_vcmp_node50_address}
+          names:
+          - ${_param:openstack_vcmp_node50_hostname}
+          - ${_param:openstack_vcmp_node50_hostname}.${_param:cluster_domain}
+        vcmp51:
+          address: ${_param:openstack_vcmp_node51_address}
+          names:
+          - ${_param:openstack_vcmp_node51_hostname}
+          - ${_param:openstack_vcmp_node51_hostname}.${_param:cluster_domain}
+        vcmp52:
+          address: ${_param:openstack_vcmp_node52_address}
+          names:
+          - ${_param:openstack_vcmp_node52_hostname}
+          - ${_param:openstack_vcmp_node52_hostname}.${_param:cluster_domain}
+        vcmp53:
+          address: ${_param:openstack_vcmp_node53_address}
+          names:
+          - ${_param:openstack_vcmp_node53_hostname}
+          - ${_param:openstack_vcmp_node53_hostname}.${_param:cluster_domain}
+        vcmp54:
+          address: ${_param:openstack_vcmp_node54_address}
+          names:
+          - ${_param:openstack_vcmp_node54_hostname}
+          - ${_param:openstack_vcmp_node54_hostname}.${_param:cluster_domain}
+        vcmp55:
+          address: ${_param:openstack_vcmp_node55_address}
+          names:
+          - ${_param:openstack_vcmp_node55_hostname}
+          - ${_param:openstack_vcmp_node55_hostname}.${_param:cluster_domain}
+        vcmp56:
+          address: ${_param:openstack_vcmp_node56_address}
+          names:
+          - ${_param:openstack_vcmp_node56_hostname}
+          - ${_param:openstack_vcmp_node56_hostname}.${_param:cluster_domain}
+        vcmp57:
+          address: ${_param:openstack_vcmp_node57_address}
+          names:
+          - ${_param:openstack_vcmp_node57_hostname}
+          - ${_param:openstack_vcmp_node57_hostname}.${_param:cluster_domain}
+        vcmp58:
+          address: ${_param:openstack_vcmp_node58_address}
+          names:
+          - ${_param:openstack_vcmp_node58_hostname}
+          - ${_param:openstack_vcmp_node58_hostname}.${_param:cluster_domain}
+        vcmp59:
+          address: ${_param:openstack_vcmp_node59_address}
+          names:
+          - ${_param:openstack_vcmp_node59_hostname}
+          - ${_param:openstack_vcmp_node59_hostname}.${_param:cluster_domain}
+        vcmp60:
+          address: ${_param:openstack_vcmp_node60_address}
+          names:
+          - ${_param:openstack_vcmp_node60_hostname}
+          - ${_param:openstack_vcmp_node60_hostname}.${_param:cluster_domain}
+        vcmp61:
+          address: ${_param:openstack_vcmp_node61_address}
+          names:
+          - ${_param:openstack_vcmp_node61_hostname}
+          - ${_param:openstack_vcmp_node61_hostname}.${_param:cluster_domain}
+        vcmp62:
+          address: ${_param:openstack_vcmp_node62_address}
+          names:
+          - ${_param:openstack_vcmp_node62_hostname}
+          - ${_param:openstack_vcmp_node62_hostname}.${_param:cluster_domain}
+        vcmp63:
+          address: ${_param:openstack_vcmp_node63_address}
+          names:
+          - ${_param:openstack_vcmp_node63_hostname}
+          - ${_param:openstack_vcmp_node63_hostname}.${_param:cluster_domain}
+        vcmp64:
+          address: ${_param:openstack_vcmp_node64_address}
+          names:
+          - ${_param:openstack_vcmp_node64_hostname}
+          - ${_param:openstack_vcmp_node64_hostname}.${_param:cluster_domain}
+        vcmp65:
+          address: ${_param:openstack_vcmp_node65_address}
+          names:
+          - ${_param:openstack_vcmp_node65_hostname}
+          - ${_param:openstack_vcmp_node65_hostname}.${_param:cluster_domain}
+        vcmp66:
+          address: ${_param:openstack_vcmp_node66_address}
+          names:
+          - ${_param:openstack_vcmp_node66_hostname}
+          - ${_param:openstack_vcmp_node66_hostname}.${_param:cluster_domain}
+        vcmp67:
+          address: ${_param:openstack_vcmp_node67_address}
+          names:
+          - ${_param:openstack_vcmp_node67_hostname}
+          - ${_param:openstack_vcmp_node67_hostname}.${_param:cluster_domain}
+        vcmp68:
+          address: ${_param:openstack_vcmp_node68_address}
+          names:
+          - ${_param:openstack_vcmp_node68_hostname}
+          - ${_param:openstack_vcmp_node68_hostname}.${_param:cluster_domain}
+        vcmp69:
+          address: ${_param:openstack_vcmp_node69_address}
+          names:
+          - ${_param:openstack_vcmp_node69_hostname}
+          - ${_param:openstack_vcmp_node69_hostname}.${_param:cluster_domain}
+        vcmp70:
+          address: ${_param:openstack_vcmp_node70_address}
+          names:
+          - ${_param:openstack_vcmp_node70_hostname}
+          - ${_param:openstack_vcmp_node70_hostname}.${_param:cluster_domain}
+        vcmp71:
+          address: ${_param:openstack_vcmp_node71_address}
+          names:
+          - ${_param:openstack_vcmp_node71_hostname}
+          - ${_param:openstack_vcmp_node71_hostname}.${_param:cluster_domain}
+        vcmp72:
+          address: ${_param:openstack_vcmp_node72_address}
+          names:
+          - ${_param:openstack_vcmp_node72_hostname}
+          - ${_param:openstack_vcmp_node72_hostname}.${_param:cluster_domain}
+        vcmp73:
+          address: ${_param:openstack_vcmp_node73_address}
+          names:
+          - ${_param:openstack_vcmp_node73_hostname}
+          - ${_param:openstack_vcmp_node73_hostname}.${_param:cluster_domain}
+        vcmp74:
+          address: ${_param:openstack_vcmp_node74_address}
+          names:
+          - ${_param:openstack_vcmp_node74_hostname}
+          - ${_param:openstack_vcmp_node74_hostname}.${_param:cluster_domain}
+        vcmp75:
+          address: ${_param:openstack_vcmp_node75_address}
+          names:
+          - ${_param:openstack_vcmp_node75_hostname}
+          - ${_param:openstack_vcmp_node75_hostname}.${_param:cluster_domain}
+        vcmp76:
+          address: ${_param:openstack_vcmp_node76_address}
+          names:
+          - ${_param:openstack_vcmp_node76_hostname}
+          - ${_param:openstack_vcmp_node76_hostname}.${_param:cluster_domain}
+        vcmp77:
+          address: ${_param:openstack_vcmp_node77_address}
+          names:
+          - ${_param:openstack_vcmp_node77_hostname}
+          - ${_param:openstack_vcmp_node77_hostname}.${_param:cluster_domain}
+        vcmp78:
+          address: ${_param:openstack_vcmp_node78_address}
+          names:
+          - ${_param:openstack_vcmp_node78_hostname}
+          - ${_param:openstack_vcmp_node78_hostname}.${_param:cluster_domain}
+        vcmp79:
+          address: ${_param:openstack_vcmp_node79_address}
+          names:
+          - ${_param:openstack_vcmp_node79_hostname}
+          - ${_param:openstack_vcmp_node79_hostname}.${_param:cluster_domain}
+        vcmp80:
+          address: ${_param:openstack_vcmp_node80_address}
+          names:
+          - ${_param:openstack_vcmp_node80_hostname}
+          - ${_param:openstack_vcmp_node80_hostname}.${_param:cluster_domain}
+        vcmp81:
+          address: ${_param:openstack_vcmp_node81_address}
+          names:
+          - ${_param:openstack_vcmp_node81_hostname}
+          - ${_param:openstack_vcmp_node81_hostname}.${_param:cluster_domain}
+        vcmp82:
+          address: ${_param:openstack_vcmp_node82_address}
+          names:
+          - ${_param:openstack_vcmp_node82_hostname}
+          - ${_param:openstack_vcmp_node82_hostname}.${_param:cluster_domain}
+        vcmp83:
+          address: ${_param:openstack_vcmp_node83_address}
+          names:
+          - ${_param:openstack_vcmp_node83_hostname}
+          - ${_param:openstack_vcmp_node83_hostname}.${_param:cluster_domain}
+        vcmp84:
+          address: ${_param:openstack_vcmp_node84_address}
+          names:
+          - ${_param:openstack_vcmp_node84_hostname}
+          - ${_param:openstack_vcmp_node84_hostname}.${_param:cluster_domain}
+        vcmp85:
+          address: ${_param:openstack_vcmp_node85_address}
+          names:
+          - ${_param:openstack_vcmp_node85_hostname}
+          - ${_param:openstack_vcmp_node85_hostname}.${_param:cluster_domain}
+        vcmp86:
+          address: ${_param:openstack_vcmp_node86_address}
+          names:
+          - ${_param:openstack_vcmp_node86_hostname}
+          - ${_param:openstack_vcmp_node86_hostname}.${_param:cluster_domain}
+        vcmp87:
+          address: ${_param:openstack_vcmp_node87_address}
+          names:
+          - ${_param:openstack_vcmp_node87_hostname}
+          - ${_param:openstack_vcmp_node87_hostname}.${_param:cluster_domain}
+        vcmp88:
+          address: ${_param:openstack_vcmp_node88_address}
+          names:
+          - ${_param:openstack_vcmp_node88_hostname}
+          - ${_param:openstack_vcmp_node88_hostname}.${_param:cluster_domain}
+        vcmp89:
+          address: ${_param:openstack_vcmp_node89_address}
+          names:
+          - ${_param:openstack_vcmp_node89_hostname}
+          - ${_param:openstack_vcmp_node89_hostname}.${_param:cluster_domain}
+        vcmp90:
+          address: ${_param:openstack_vcmp_node90_address}
+          names:
+          - ${_param:openstack_vcmp_node90_hostname}
+          - ${_param:openstack_vcmp_node90_hostname}.${_param:cluster_domain}
+        vcmp91:
+          address: ${_param:openstack_vcmp_node91_address}
+          names:
+          - ${_param:openstack_vcmp_node91_hostname}
+          - ${_param:openstack_vcmp_node91_hostname}.${_param:cluster_domain}
+        vcmp92:
+          address: ${_param:openstack_vcmp_node92_address}
+          names:
+          - ${_param:openstack_vcmp_node92_hostname}
+          - ${_param:openstack_vcmp_node92_hostname}.${_param:cluster_domain}
+        vcmp93:
+          address: ${_param:openstack_vcmp_node93_address}
+          names:
+          - ${_param:openstack_vcmp_node93_hostname}
+          - ${_param:openstack_vcmp_node93_hostname}.${_param:cluster_domain}
+        vcmp94:
+          address: ${_param:openstack_vcmp_node94_address}
+          names:
+          - ${_param:openstack_vcmp_node94_hostname}
+          - ${_param:openstack_vcmp_node94_hostname}.${_param:cluster_domain}
+        vcmp95:
+          address: ${_param:openstack_vcmp_node95_address}
+          names:
+          - ${_param:openstack_vcmp_node95_hostname}
+          - ${_param:openstack_vcmp_node95_hostname}.${_param:cluster_domain}
+        vcmp96:
+          address: ${_param:openstack_vcmp_node96_address}
+          names:
+          - ${_param:openstack_vcmp_node96_hostname}
+          - ${_param:openstack_vcmp_node96_hostname}.${_param:cluster_domain}
+        vcmp97:
+          address: ${_param:openstack_vcmp_node97_address}
+          names:
+          - ${_param:openstack_vcmp_node97_hostname}
+          - ${_param:openstack_vcmp_node97_hostname}.${_param:cluster_domain}
+        vcmp98:
+          address: ${_param:openstack_vcmp_node98_address}
+          names:
+          - ${_param:openstack_vcmp_node98_hostname}
+          - ${_param:openstack_vcmp_node98_hostname}.${_param:cluster_domain}
+        vcmp99:
+          address: ${_param:openstack_vcmp_node99_address}
+          names:
+          - ${_param:openstack_vcmp_node99_hostname}
+          - ${_param:openstack_vcmp_node99_hostname}.${_param:cluster_domain}
+        vcmp100:
+          address: ${_param:openstack_vcmp_node100_address}
+          names:
+          - ${_param:openstack_vcmp_node100_hostname}
+          - ${_param:openstack_vcmp_node100_hostname}.${_param:cluster_domain}
+        vcmp101:
+          address: ${_param:openstack_vcmp_node101_address}
+          names:
+          - ${_param:openstack_vcmp_node101_hostname}
+          - ${_param:openstack_vcmp_node101_hostname}.${_param:cluster_domain}
+        vcmp102:
+          address: ${_param:openstack_vcmp_node102_address}
+          names:
+          - ${_param:openstack_vcmp_node102_hostname}
+          - ${_param:openstack_vcmp_node102_hostname}.${_param:cluster_domain}
+        vcmp103:
+          address: ${_param:openstack_vcmp_node103_address}
+          names:
+          - ${_param:openstack_vcmp_node103_hostname}
+          - ${_param:openstack_vcmp_node103_hostname}.${_param:cluster_domain}
+        vcmp104:
+          address: ${_param:openstack_vcmp_node104_address}
+          names:
+          - ${_param:openstack_vcmp_node104_hostname}
+          - ${_param:openstack_vcmp_node104_hostname}.${_param:cluster_domain}
+        vcmp105:
+          address: ${_param:openstack_vcmp_node105_address}
+          names:
+          - ${_param:openstack_vcmp_node105_hostname}
+          - ${_param:openstack_vcmp_node105_hostname}.${_param:cluster_domain}
+        vcmp106:
+          address: ${_param:openstack_vcmp_node106_address}
+          names:
+          - ${_param:openstack_vcmp_node106_hostname}
+          - ${_param:openstack_vcmp_node106_hostname}.${_param:cluster_domain}
+        vcmp107:
+          address: ${_param:openstack_vcmp_node107_address}
+          names:
+          - ${_param:openstack_vcmp_node107_hostname}
+          - ${_param:openstack_vcmp_node107_hostname}.${_param:cluster_domain}
+        vcmp108:
+          address: ${_param:openstack_vcmp_node108_address}
+          names:
+          - ${_param:openstack_vcmp_node108_hostname}
+          - ${_param:openstack_vcmp_node108_hostname}.${_param:cluster_domain}
+        vcmp109:
+          address: ${_param:openstack_vcmp_node109_address}
+          names:
+          - ${_param:openstack_vcmp_node109_hostname}
+          - ${_param:openstack_vcmp_node109_hostname}.${_param:cluster_domain}
+        vcmp110:
+          address: ${_param:openstack_vcmp_node110_address}
+          names:
+          - ${_param:openstack_vcmp_node110_hostname}
+          - ${_param:openstack_vcmp_node110_hostname}.${_param:cluster_domain}
+        vcmp111:
+          address: ${_param:openstack_vcmp_node111_address}
+          names:
+          - ${_param:openstack_vcmp_node111_hostname}
+          - ${_param:openstack_vcmp_node111_hostname}.${_param:cluster_domain}
+        vcmp112:
+          address: ${_param:openstack_vcmp_node112_address}
+          names:
+          - ${_param:openstack_vcmp_node112_hostname}
+          - ${_param:openstack_vcmp_node112_hostname}.${_param:cluster_domain}
+        vcmp113:
+          address: ${_param:openstack_vcmp_node113_address}
+          names:
+          - ${_param:openstack_vcmp_node113_hostname}
+          - ${_param:openstack_vcmp_node113_hostname}.${_param:cluster_domain}
+        vcmp114:
+          address: ${_param:openstack_vcmp_node114_address}
+          names:
+          - ${_param:openstack_vcmp_node114_hostname}
+          - ${_param:openstack_vcmp_node114_hostname}.${_param:cluster_domain}
+        vcmp115:
+          address: ${_param:openstack_vcmp_node115_address}
+          names:
+          - ${_param:openstack_vcmp_node115_hostname}
+          - ${_param:openstack_vcmp_node115_hostname}.${_param:cluster_domain}
+        vcmp116:
+          address: ${_param:openstack_vcmp_node116_address}
+          names:
+          - ${_param:openstack_vcmp_node116_hostname}
+          - ${_param:openstack_vcmp_node116_hostname}.${_param:cluster_domain}
+        vcmp117:
+          address: ${_param:openstack_vcmp_node117_address}
+          names:
+          - ${_param:openstack_vcmp_node117_hostname}
+          - ${_param:openstack_vcmp_node117_hostname}.${_param:cluster_domain}
+        vcmp118:
+          address: ${_param:openstack_vcmp_node118_address}
+          names:
+          - ${_param:openstack_vcmp_node118_hostname}
+          - ${_param:openstack_vcmp_node118_hostname}.${_param:cluster_domain}
+        vcmp119:
+          address: ${_param:openstack_vcmp_node119_address}
+          names:
+          - ${_param:openstack_vcmp_node119_hostname}
+          - ${_param:openstack_vcmp_node119_hostname}.${_param:cluster_domain}
+        vcmp120:
+          address: ${_param:openstack_vcmp_node120_address}
+          names:
+          - ${_param:openstack_vcmp_node120_hostname}
+          - ${_param:openstack_vcmp_node120_hostname}.${_param:cluster_domain}
+        vcmp121:
+          address: ${_param:openstack_vcmp_node121_address}
+          names:
+          - ${_param:openstack_vcmp_node121_hostname}
+          - ${_param:openstack_vcmp_node121_hostname}.${_param:cluster_domain}
+        vcmp122:
+          address: ${_param:openstack_vcmp_node122_address}
+          names:
+          - ${_param:openstack_vcmp_node122_hostname}
+          - ${_param:openstack_vcmp_node122_hostname}.${_param:cluster_domain}
+        vcmp123:
+          address: ${_param:openstack_vcmp_node123_address}
+          names:
+          - ${_param:openstack_vcmp_node123_hostname}
+          - ${_param:openstack_vcmp_node123_hostname}.${_param:cluster_domain}
+        vcmp124:
+          address: ${_param:openstack_vcmp_node124_address}
+          names:
+          - ${_param:openstack_vcmp_node124_hostname}
+          - ${_param:openstack_vcmp_node124_hostname}.${_param:cluster_domain}
+        vcmp125:
+          address: ${_param:openstack_vcmp_node125_address}
+          names:
+          - ${_param:openstack_vcmp_node125_hostname}
+          - ${_param:openstack_vcmp_node125_hostname}.${_param:cluster_domain}
+        vcmp126:
+          address: ${_param:openstack_vcmp_node126_address}
+          names:
+          - ${_param:openstack_vcmp_node126_hostname}
+          - ${_param:openstack_vcmp_node126_hostname}.${_param:cluster_domain}
+        vcmp127:
+          address: ${_param:openstack_vcmp_node127_address}
+          names:
+          - ${_param:openstack_vcmp_node127_hostname}
+          - ${_param:openstack_vcmp_node127_hostname}.${_param:cluster_domain}
+        vcmp128:
+          address: ${_param:openstack_vcmp_node128_address}
+          names:
+          - ${_param:openstack_vcmp_node128_hostname}
+          - ${_param:openstack_vcmp_node128_hostname}.${_param:cluster_domain}
+        vcmp129:
+          address: ${_param:openstack_vcmp_node129_address}
+          names:
+          - ${_param:openstack_vcmp_node129_hostname}
+          - ${_param:openstack_vcmp_node129_hostname}.${_param:cluster_domain}
+        vcmp130:
+          address: ${_param:openstack_vcmp_node130_address}
+          names:
+          - ${_param:openstack_vcmp_node130_hostname}
+          - ${_param:openstack_vcmp_node130_hostname}.${_param:cluster_domain}
+        vcmp131:
+          address: ${_param:openstack_vcmp_node131_address}
+          names:
+          - ${_param:openstack_vcmp_node131_hostname}
+          - ${_param:openstack_vcmp_node131_hostname}.${_param:cluster_domain}
+        vcmp132:
+          address: ${_param:openstack_vcmp_node132_address}
+          names:
+          - ${_param:openstack_vcmp_node132_hostname}
+          - ${_param:openstack_vcmp_node132_hostname}.${_param:cluster_domain}
+        vcmp133:
+          address: ${_param:openstack_vcmp_node133_address}
+          names:
+          - ${_param:openstack_vcmp_node133_hostname}
+          - ${_param:openstack_vcmp_node133_hostname}.${_param:cluster_domain}
+        vcmp134:
+          address: ${_param:openstack_vcmp_node134_address}
+          names:
+          - ${_param:openstack_vcmp_node134_hostname}
+          - ${_param:openstack_vcmp_node134_hostname}.${_param:cluster_domain}
+        vcmp135:
+          address: ${_param:openstack_vcmp_node135_address}
+          names:
+          - ${_param:openstack_vcmp_node135_hostname}
+          - ${_param:openstack_vcmp_node135_hostname}.${_param:cluster_domain}
+        vcmp136:
+          address: ${_param:openstack_vcmp_node136_address}
+          names:
+          - ${_param:openstack_vcmp_node136_hostname}
+          - ${_param:openstack_vcmp_node136_hostname}.${_param:cluster_domain}
+        vcmp137:
+          address: ${_param:openstack_vcmp_node137_address}
+          names:
+          - ${_param:openstack_vcmp_node137_hostname}
+          - ${_param:openstack_vcmp_node137_hostname}.${_param:cluster_domain}
+        vcmp138:
+          address: ${_param:openstack_vcmp_node138_address}
+          names:
+          - ${_param:openstack_vcmp_node138_hostname}
+          - ${_param:openstack_vcmp_node138_hostname}.${_param:cluster_domain}
+        vcmp139:
+          address: ${_param:openstack_vcmp_node139_address}
+          names:
+          - ${_param:openstack_vcmp_node139_hostname}
+          - ${_param:openstack_vcmp_node139_hostname}.${_param:cluster_domain}
+        vcmp140:
+          address: ${_param:openstack_vcmp_node140_address}
+          names:
+          - ${_param:openstack_vcmp_node140_hostname}
+          - ${_param:openstack_vcmp_node140_hostname}.${_param:cluster_domain}
+        vcmp141:
+          address: ${_param:openstack_vcmp_node141_address}
+          names:
+          - ${_param:openstack_vcmp_node141_hostname}
+          - ${_param:openstack_vcmp_node141_hostname}.${_param:cluster_domain}
+        vcmp142:
+          address: ${_param:openstack_vcmp_node142_address}
+          names:
+          - ${_param:openstack_vcmp_node142_hostname}
+          - ${_param:openstack_vcmp_node142_hostname}.${_param:cluster_domain}
+        vcmp143:
+          address: ${_param:openstack_vcmp_node143_address}
+          names:
+          - ${_param:openstack_vcmp_node143_hostname}
+          - ${_param:openstack_vcmp_node143_hostname}.${_param:cluster_domain}
+        vcmp144:
+          address: ${_param:openstack_vcmp_node144_address}
+          names:
+          - ${_param:openstack_vcmp_node144_hostname}
+          - ${_param:openstack_vcmp_node144_hostname}.${_param:cluster_domain}
+        vcmp145:
+          address: ${_param:openstack_vcmp_node145_address}
+          names:
+          - ${_param:openstack_vcmp_node145_hostname}
+          - ${_param:openstack_vcmp_node145_hostname}.${_param:cluster_domain}
+        vcmp146:
+          address: ${_param:openstack_vcmp_node146_address}
+          names:
+          - ${_param:openstack_vcmp_node146_hostname}
+          - ${_param:openstack_vcmp_node146_hostname}.${_param:cluster_domain}
+        vcmp147:
+          address: ${_param:openstack_vcmp_node147_address}
+          names:
+          - ${_param:openstack_vcmp_node147_hostname}
+          - ${_param:openstack_vcmp_node147_hostname}.${_param:cluster_domain}
+        vcmp148:
+          address: ${_param:openstack_vcmp_node148_address}
+          names:
+          - ${_param:openstack_vcmp_node148_hostname}
+          - ${_param:openstack_vcmp_node148_hostname}.${_param:cluster_domain}
+        vcmp149:
+          address: ${_param:openstack_vcmp_node149_address}
+          names:
+          - ${_param:openstack_vcmp_node149_hostname}
+          - ${_param:openstack_vcmp_node149_hostname}.${_param:cluster_domain}
+        vcmp150:
+          address: ${_param:openstack_vcmp_node150_address}
+          names:
+          - ${_param:openstack_vcmp_node150_hostname}
+          - ${_param:openstack_vcmp_node150_hostname}.${_param:cluster_domain}
+        vcmp151:
+          address: ${_param:openstack_vcmp_node151_address}
+          names:
+          - ${_param:openstack_vcmp_node151_hostname}
+          - ${_param:openstack_vcmp_node151_hostname}.${_param:cluster_domain}
+        vcmp152:
+          address: ${_param:openstack_vcmp_node152_address}
+          names:
+          - ${_param:openstack_vcmp_node152_hostname}
+          - ${_param:openstack_vcmp_node152_hostname}.${_param:cluster_domain}
+        vcmp153:
+          address: ${_param:openstack_vcmp_node153_address}
+          names:
+          - ${_param:openstack_vcmp_node153_hostname}
+          - ${_param:openstack_vcmp_node153_hostname}.${_param:cluster_domain}
+        vcmp154:
+          address: ${_param:openstack_vcmp_node154_address}
+          names:
+          - ${_param:openstack_vcmp_node154_hostname}
+          - ${_param:openstack_vcmp_node154_hostname}.${_param:cluster_domain}
+        vcmp155:
+          address: ${_param:openstack_vcmp_node155_address}
+          names:
+          - ${_param:openstack_vcmp_node155_hostname}
+          - ${_param:openstack_vcmp_node155_hostname}.${_param:cluster_domain}
+        vcmp156:
+          address: ${_param:openstack_vcmp_node156_address}
+          names:
+          - ${_param:openstack_vcmp_node156_hostname}
+          - ${_param:openstack_vcmp_node156_hostname}.${_param:cluster_domain}
+        vcmp157:
+          address: ${_param:openstack_vcmp_node157_address}
+          names:
+          - ${_param:openstack_vcmp_node157_hostname}
+          - ${_param:openstack_vcmp_node157_hostname}.${_param:cluster_domain}
+        vcmp158:
+          address: ${_param:openstack_vcmp_node158_address}
+          names:
+          - ${_param:openstack_vcmp_node158_hostname}
+          - ${_param:openstack_vcmp_node158_hostname}.${_param:cluster_domain}
+        vcmp159:
+          address: ${_param:openstack_vcmp_node159_address}
+          names:
+          - ${_param:openstack_vcmp_node159_hostname}
+          - ${_param:openstack_vcmp_node159_hostname}.${_param:cluster_domain}
+        vcmp160:
+          address: ${_param:openstack_vcmp_node160_address}
+          names:
+          - ${_param:openstack_vcmp_node160_hostname}
+          - ${_param:openstack_vcmp_node160_hostname}.${_param:cluster_domain}
+        vcmp161:
+          address: ${_param:openstack_vcmp_node161_address}
+          names:
+          - ${_param:openstack_vcmp_node161_hostname}
+          - ${_param:openstack_vcmp_node161_hostname}.${_param:cluster_domain}
+        vcmp162:
+          address: ${_param:openstack_vcmp_node162_address}
+          names:
+          - ${_param:openstack_vcmp_node162_hostname}
+          - ${_param:openstack_vcmp_node162_hostname}.${_param:cluster_domain}
+        vcmp163:
+          address: ${_param:openstack_vcmp_node163_address}
+          names:
+          - ${_param:openstack_vcmp_node163_hostname}
+          - ${_param:openstack_vcmp_node163_hostname}.${_param:cluster_domain}
+        vcmp164:
+          address: ${_param:openstack_vcmp_node164_address}
+          names:
+          - ${_param:openstack_vcmp_node164_hostname}
+          - ${_param:openstack_vcmp_node164_hostname}.${_param:cluster_domain}
+        vcmp165:
+          address: ${_param:openstack_vcmp_node165_address}
+          names:
+          - ${_param:openstack_vcmp_node165_hostname}
+          - ${_param:openstack_vcmp_node165_hostname}.${_param:cluster_domain}
+        vcmp166:
+          address: ${_param:openstack_vcmp_node166_address}
+          names:
+          - ${_param:openstack_vcmp_node166_hostname}
+          - ${_param:openstack_vcmp_node166_hostname}.${_param:cluster_domain}
+        vcmp167:
+          address: ${_param:openstack_vcmp_node167_address}
+          names:
+          - ${_param:openstack_vcmp_node167_hostname}
+          - ${_param:openstack_vcmp_node167_hostname}.${_param:cluster_domain}
+        vcmp168:
+          address: ${_param:openstack_vcmp_node168_address}
+          names:
+          - ${_param:openstack_vcmp_node168_hostname}
+          - ${_param:openstack_vcmp_node168_hostname}.${_param:cluster_domain}
+        vcmp169:
+          address: ${_param:openstack_vcmp_node169_address}
+          names:
+          - ${_param:openstack_vcmp_node169_hostname}
+          - ${_param:openstack_vcmp_node169_hostname}.${_param:cluster_domain}
+        vcmp170:
+          address: ${_param:openstack_vcmp_node170_address}
+          names:
+          - ${_param:openstack_vcmp_node170_hostname}
+          - ${_param:openstack_vcmp_node170_hostname}.${_param:cluster_domain}
+        vcmp171:
+          address: ${_param:openstack_vcmp_node171_address}
+          names:
+          - ${_param:openstack_vcmp_node171_hostname}
+          - ${_param:openstack_vcmp_node171_hostname}.${_param:cluster_domain}
+        vcmp172:
+          address: ${_param:openstack_vcmp_node172_address}
+          names:
+          - ${_param:openstack_vcmp_node172_hostname}
+          - ${_param:openstack_vcmp_node172_hostname}.${_param:cluster_domain}
+        vcmp173:
+          address: ${_param:openstack_vcmp_node173_address}
+          names:
+          - ${_param:openstack_vcmp_node173_hostname}
+          - ${_param:openstack_vcmp_node173_hostname}.${_param:cluster_domain}
+        vcmp174:
+          address: ${_param:openstack_vcmp_node174_address}
+          names:
+          - ${_param:openstack_vcmp_node174_hostname}
+          - ${_param:openstack_vcmp_node174_hostname}.${_param:cluster_domain}
+        vcmp175:
+          address: ${_param:openstack_vcmp_node175_address}
+          names:
+          - ${_param:openstack_vcmp_node175_hostname}
+          - ${_param:openstack_vcmp_node175_hostname}.${_param:cluster_domain}
+        vcmp176:
+          address: ${_param:openstack_vcmp_node176_address}
+          names:
+          - ${_param:openstack_vcmp_node176_hostname}
+          - ${_param:openstack_vcmp_node176_hostname}.${_param:cluster_domain}
+        vcmp177:
+          address: ${_param:openstack_vcmp_node177_address}
+          names:
+          - ${_param:openstack_vcmp_node177_hostname}
+          - ${_param:openstack_vcmp_node177_hostname}.${_param:cluster_domain}
+        vcmp178:
+          address: ${_param:openstack_vcmp_node178_address}
+          names:
+          - ${_param:openstack_vcmp_node178_hostname}
+          - ${_param:openstack_vcmp_node178_hostname}.${_param:cluster_domain}
+        vcmp179:
+          address: ${_param:openstack_vcmp_node179_address}
+          names:
+          - ${_param:openstack_vcmp_node179_hostname}
+          - ${_param:openstack_vcmp_node179_hostname}.${_param:cluster_domain}
+        vcmp180:
+          address: ${_param:openstack_vcmp_node180_address}
+          names:
+          - ${_param:openstack_vcmp_node180_hostname}
+          - ${_param:openstack_vcmp_node180_hostname}.${_param:cluster_domain}
+        vcmp181:
+          address: ${_param:openstack_vcmp_node181_address}
+          names:
+          - ${_param:openstack_vcmp_node181_hostname}
+          - ${_param:openstack_vcmp_node181_hostname}.${_param:cluster_domain}
+        vcmp182:
+          address: ${_param:openstack_vcmp_node182_address}
+          names:
+          - ${_param:openstack_vcmp_node182_hostname}
+          - ${_param:openstack_vcmp_node182_hostname}.${_param:cluster_domain}
+        vcmp183:
+          address: ${_param:openstack_vcmp_node183_address}
+          names:
+          - ${_param:openstack_vcmp_node183_hostname}
+          - ${_param:openstack_vcmp_node183_hostname}.${_param:cluster_domain}
+        vcmp184:
+          address: ${_param:openstack_vcmp_node184_address}
+          names:
+          - ${_param:openstack_vcmp_node184_hostname}
+          - ${_param:openstack_vcmp_node184_hostname}.${_param:cluster_domain}
+        vcmp185:
+          address: ${_param:openstack_vcmp_node185_address}
+          names:
+          - ${_param:openstack_vcmp_node185_hostname}
+          - ${_param:openstack_vcmp_node185_hostname}.${_param:cluster_domain}
+        vcmp186:
+          address: ${_param:openstack_vcmp_node186_address}
+          names:
+          - ${_param:openstack_vcmp_node186_hostname}
+          - ${_param:openstack_vcmp_node186_hostname}.${_param:cluster_domain}
+        vcmp187:
+          address: ${_param:openstack_vcmp_node187_address}
+          names:
+          - ${_param:openstack_vcmp_node187_hostname}
+          - ${_param:openstack_vcmp_node187_hostname}.${_param:cluster_domain}
+        vcmp188:
+          address: ${_param:openstack_vcmp_node188_address}
+          names:
+          - ${_param:openstack_vcmp_node188_hostname}
+          - ${_param:openstack_vcmp_node188_hostname}.${_param:cluster_domain}
+        vcmp189:
+          address: ${_param:openstack_vcmp_node189_address}
+          names:
+          - ${_param:openstack_vcmp_node189_hostname}
+          - ${_param:openstack_vcmp_node189_hostname}.${_param:cluster_domain}
+        vcmp190:
+          address: ${_param:openstack_vcmp_node190_address}
+          names:
+          - ${_param:openstack_vcmp_node190_hostname}
+          - ${_param:openstack_vcmp_node190_hostname}.${_param:cluster_domain}
+        vcmp191:
+          address: ${_param:openstack_vcmp_node191_address}
+          names:
+          - ${_param:openstack_vcmp_node191_hostname}
+          - ${_param:openstack_vcmp_node191_hostname}.${_param:cluster_domain}
+        vcmp192:
+          address: ${_param:openstack_vcmp_node192_address}
+          names:
+          - ${_param:openstack_vcmp_node192_hostname}
+          - ${_param:openstack_vcmp_node192_hostname}.${_param:cluster_domain}
+        vcmp193:
+          address: ${_param:openstack_vcmp_node193_address}
+          names:
+          - ${_param:openstack_vcmp_node193_hostname}
+          - ${_param:openstack_vcmp_node193_hostname}.${_param:cluster_domain}
+        vcmp194:
+          address: ${_param:openstack_vcmp_node194_address}
+          names:
+          - ${_param:openstack_vcmp_node194_hostname}
+          - ${_param:openstack_vcmp_node194_hostname}.${_param:cluster_domain}
+        vcmp195:
+          address: ${_param:openstack_vcmp_node195_address}
+          names:
+          - ${_param:openstack_vcmp_node195_hostname}
+          - ${_param:openstack_vcmp_node195_hostname}.${_param:cluster_domain}
+        vcmp196:
+          address: ${_param:openstack_vcmp_node196_address}
+          names:
+          - ${_param:openstack_vcmp_node196_hostname}
+          - ${_param:openstack_vcmp_node196_hostname}.${_param:cluster_domain}
+        vcmp197:
+          address: ${_param:openstack_vcmp_node197_address}
+          names:
+          - ${_param:openstack_vcmp_node197_hostname}
+          - ${_param:openstack_vcmp_node197_hostname}.${_param:cluster_domain}
+        vcmp198:
+          address: ${_param:openstack_vcmp_node198_address}
+          names:
+          - ${_param:openstack_vcmp_node198_hostname}
+          - ${_param:openstack_vcmp_node198_hostname}.${_param:cluster_domain}
+        vcmp199:
+          address: ${_param:openstack_vcmp_node199_address}
+          names:
+          - ${_param:openstack_vcmp_node199_hostname}
+          - ${_param:openstack_vcmp_node199_hostname}.${_param:cluster_domain}
+        vcmp200:
+          address: ${_param:openstack_vcmp_node200_address}
+          names:
+          - ${_param:openstack_vcmp_node200_hostname}
+          - ${_param:openstack_vcmp_node200_hostname}.${_param:cluster_domain}
+        vcmp201:
+          address: ${_param:openstack_vcmp_node201_address}
+          names:
+          - ${_param:openstack_vcmp_node201_hostname}
+          - ${_param:openstack_vcmp_node201_hostname}.${_param:cluster_domain}
+        vcmp202:
+          address: ${_param:openstack_vcmp_node202_address}
+          names:
+          - ${_param:openstack_vcmp_node202_hostname}
+          - ${_param:openstack_vcmp_node202_hostname}.${_param:cluster_domain}
+        vcmp203:
+          address: ${_param:openstack_vcmp_node203_address}
+          names:
+          - ${_param:openstack_vcmp_node203_hostname}
+          - ${_param:openstack_vcmp_node203_hostname}.${_param:cluster_domain}
+        vcmp204:
+          address: ${_param:openstack_vcmp_node204_address}
+          names:
+          - ${_param:openstack_vcmp_node204_hostname}
+          - ${_param:openstack_vcmp_node204_hostname}.${_param:cluster_domain}
+        vcmp205:
+          address: ${_param:openstack_vcmp_node205_address}
+          names:
+          - ${_param:openstack_vcmp_node205_hostname}
+          - ${_param:openstack_vcmp_node205_hostname}.${_param:cluster_domain}
+        vcmp206:
+          address: ${_param:openstack_vcmp_node206_address}
+          names:
+          - ${_param:openstack_vcmp_node206_hostname}
+          - ${_param:openstack_vcmp_node206_hostname}.${_param:cluster_domain}
+        vcmp207:
+          address: ${_param:openstack_vcmp_node207_address}
+          names:
+          - ${_param:openstack_vcmp_node207_hostname}
+          - ${_param:openstack_vcmp_node207_hostname}.${_param:cluster_domain}
+        vcmp208:
+          address: ${_param:openstack_vcmp_node208_address}
+          names:
+          - ${_param:openstack_vcmp_node208_hostname}
+          - ${_param:openstack_vcmp_node208_hostname}.${_param:cluster_domain}
+        vcmp209:
+          address: ${_param:openstack_vcmp_node209_address}
+          names:
+          - ${_param:openstack_vcmp_node209_hostname}
+          - ${_param:openstack_vcmp_node209_hostname}.${_param:cluster_domain}
+        vcmp210:
+          address: ${_param:openstack_vcmp_node210_address}
+          names:
+          - ${_param:openstack_vcmp_node210_hostname}
+          - ${_param:openstack_vcmp_node210_hostname}.${_param:cluster_domain}
+        vcmp211:
+          address: ${_param:openstack_vcmp_node211_address}
+          names:
+          - ${_param:openstack_vcmp_node211_hostname}
+          - ${_param:openstack_vcmp_node211_hostname}.${_param:cluster_domain}
+        vcmp212:
+          address: ${_param:openstack_vcmp_node212_address}
+          names:
+          - ${_param:openstack_vcmp_node212_hostname}
+          - ${_param:openstack_vcmp_node212_hostname}.${_param:cluster_domain}
+        vcmp213:
+          address: ${_param:openstack_vcmp_node213_address}
+          names:
+          - ${_param:openstack_vcmp_node213_hostname}
+          - ${_param:openstack_vcmp_node213_hostname}.${_param:cluster_domain}
+        vcmp214:
+          address: ${_param:openstack_vcmp_node214_address}
+          names:
+          - ${_param:openstack_vcmp_node214_hostname}
+          - ${_param:openstack_vcmp_node214_hostname}.${_param:cluster_domain}
+        vcmp215:
+          address: ${_param:openstack_vcmp_node215_address}
+          names:
+          - ${_param:openstack_vcmp_node215_hostname}
+          - ${_param:openstack_vcmp_node215_hostname}.${_param:cluster_domain}
+        vcmp216:
+          address: ${_param:openstack_vcmp_node216_address}
+          names:
+          - ${_param:openstack_vcmp_node216_hostname}
+          - ${_param:openstack_vcmp_node216_hostname}.${_param:cluster_domain}
+        vcmp217:
+          address: ${_param:openstack_vcmp_node217_address}
+          names:
+          - ${_param:openstack_vcmp_node217_hostname}
+          - ${_param:openstack_vcmp_node217_hostname}.${_param:cluster_domain}
+        vcmp218:
+          address: ${_param:openstack_vcmp_node218_address}
+          names:
+          - ${_param:openstack_vcmp_node218_hostname}
+          - ${_param:openstack_vcmp_node218_hostname}.${_param:cluster_domain}
+        vcmp219:
+          address: ${_param:openstack_vcmp_node219_address}
+          names:
+          - ${_param:openstack_vcmp_node219_hostname}
+          - ${_param:openstack_vcmp_node219_hostname}.${_param:cluster_domain}
+        vcmp220:
+          address: ${_param:openstack_vcmp_node220_address}
+          names:
+          - ${_param:openstack_vcmp_node220_hostname}
+          - ${_param:openstack_vcmp_node220_hostname}.${_param:cluster_domain}
+        vcmp221:
+          address: ${_param:openstack_vcmp_node221_address}
+          names:
+          - ${_param:openstack_vcmp_node221_hostname}
+          - ${_param:openstack_vcmp_node221_hostname}.${_param:cluster_domain}
+        vcmp222:
+          address: ${_param:openstack_vcmp_node222_address}
+          names:
+          - ${_param:openstack_vcmp_node222_hostname}
+          - ${_param:openstack_vcmp_node222_hostname}.${_param:cluster_domain}
+        vcmp223:
+          address: ${_param:openstack_vcmp_node223_address}
+          names:
+          - ${_param:openstack_vcmp_node223_hostname}
+          - ${_param:openstack_vcmp_node223_hostname}.${_param:cluster_domain}
+        vcmp224:
+          address: ${_param:openstack_vcmp_node224_address}
+          names:
+          - ${_param:openstack_vcmp_node224_hostname}
+          - ${_param:openstack_vcmp_node224_hostname}.${_param:cluster_domain}
+        vcmp225:
+          address: ${_param:openstack_vcmp_node225_address}
+          names:
+          - ${_param:openstack_vcmp_node225_hostname}
+          - ${_param:openstack_vcmp_node225_hostname}.${_param:cluster_domain}
+        vcmp226:
+          address: ${_param:openstack_vcmp_node226_address}
+          names:
+          - ${_param:openstack_vcmp_node226_hostname}
+          - ${_param:openstack_vcmp_node226_hostname}.${_param:cluster_domain}
+        vcmp227:
+          address: ${_param:openstack_vcmp_node227_address}
+          names:
+          - ${_param:openstack_vcmp_node227_hostname}
+          - ${_param:openstack_vcmp_node227_hostname}.${_param:cluster_domain}
+        vcmp228:
+          address: ${_param:openstack_vcmp_node228_address}
+          names:
+          - ${_param:openstack_vcmp_node228_hostname}
+          - ${_param:openstack_vcmp_node228_hostname}.${_param:cluster_domain}
+        vcmp229:
+          address: ${_param:openstack_vcmp_node229_address}
+          names:
+          - ${_param:openstack_vcmp_node229_hostname}
+          - ${_param:openstack_vcmp_node229_hostname}.${_param:cluster_domain}
+        vcmp230:
+          address: ${_param:openstack_vcmp_node230_address}
+          names:
+          - ${_param:openstack_vcmp_node230_hostname}
+          - ${_param:openstack_vcmp_node230_hostname}.${_param:cluster_domain}
+        vcmp231:
+          address: ${_param:openstack_vcmp_node231_address}
+          names:
+          - ${_param:openstack_vcmp_node231_hostname}
+          - ${_param:openstack_vcmp_node231_hostname}.${_param:cluster_domain}
+        vcmp232:
+          address: ${_param:openstack_vcmp_node232_address}
+          names:
+          - ${_param:openstack_vcmp_node232_hostname}
+          - ${_param:openstack_vcmp_node232_hostname}.${_param:cluster_domain}
+        vcmp233:
+          address: ${_param:openstack_vcmp_node233_address}
+          names:
+          - ${_param:openstack_vcmp_node233_hostname}
+          - ${_param:openstack_vcmp_node233_hostname}.${_param:cluster_domain}
+        vcmp234:
+          address: ${_param:openstack_vcmp_node234_address}
+          names:
+          - ${_param:openstack_vcmp_node234_hostname}
+          - ${_param:openstack_vcmp_node234_hostname}.${_param:cluster_domain}
+        vcmp235:
+          address: ${_param:openstack_vcmp_node235_address}
+          names:
+          - ${_param:openstack_vcmp_node235_hostname}
+          - ${_param:openstack_vcmp_node235_hostname}.${_param:cluster_domain}
+        vcmp236:
+          address: ${_param:openstack_vcmp_node236_address}
+          names:
+          - ${_param:openstack_vcmp_node236_hostname}
+          - ${_param:openstack_vcmp_node236_hostname}.${_param:cluster_domain}
+        vcmp237:
+          address: ${_param:openstack_vcmp_node237_address}
+          names:
+          - ${_param:openstack_vcmp_node237_hostname}
+          - ${_param:openstack_vcmp_node237_hostname}.${_param:cluster_domain}
+        vcmp238:
+          address: ${_param:openstack_vcmp_node238_address}
+          names:
+          - ${_param:openstack_vcmp_node238_hostname}
+          - ${_param:openstack_vcmp_node238_hostname}.${_param:cluster_domain}
+        vcmp239:
+          address: ${_param:openstack_vcmp_node239_address}
+          names:
+          - ${_param:openstack_vcmp_node239_hostname}
+          - ${_param:openstack_vcmp_node239_hostname}.${_param:cluster_domain}
+        vcmp240:
+          address: ${_param:openstack_vcmp_node240_address}
+          names:
+          - ${_param:openstack_vcmp_node240_hostname}
+          - ${_param:openstack_vcmp_node240_hostname}.${_param:cluster_domain}
+        vcmp241:
+          address: ${_param:openstack_vcmp_node241_address}
+          names:
+          - ${_param:openstack_vcmp_node241_hostname}
+          - ${_param:openstack_vcmp_node241_hostname}.${_param:cluster_domain}
+        vcmp242:
+          address: ${_param:openstack_vcmp_node242_address}
+          names:
+          - ${_param:openstack_vcmp_node242_hostname}
+          - ${_param:openstack_vcmp_node242_hostname}.${_param:cluster_domain}
+        vcmp243:
+          address: ${_param:openstack_vcmp_node243_address}
+          names:
+          - ${_param:openstack_vcmp_node243_hostname}
+          - ${_param:openstack_vcmp_node243_hostname}.${_param:cluster_domain}
+        vcmp244:
+          address: ${_param:openstack_vcmp_node244_address}
+          names:
+          - ${_param:openstack_vcmp_node244_hostname}
+          - ${_param:openstack_vcmp_node244_hostname}.${_param:cluster_domain}
+        vcmp245:
+          address: ${_param:openstack_vcmp_node245_address}
+          names:
+          - ${_param:openstack_vcmp_node245_hostname}
+          - ${_param:openstack_vcmp_node245_hostname}.${_param:cluster_domain}
+        vcmp246:
+          address: ${_param:openstack_vcmp_node246_address}
+          names:
+          - ${_param:openstack_vcmp_node246_hostname}
+          - ${_param:openstack_vcmp_node246_hostname}.${_param:cluster_domain}
+        vcmp247:
+          address: ${_param:openstack_vcmp_node247_address}
+          names:
+          - ${_param:openstack_vcmp_node247_hostname}
+          - ${_param:openstack_vcmp_node247_hostname}.${_param:cluster_domain}
+        vcmp248:
+          address: ${_param:openstack_vcmp_node248_address}
+          names:
+          - ${_param:openstack_vcmp_node248_hostname}
+          - ${_param:openstack_vcmp_node248_hostname}.${_param:cluster_domain}
+        vcmp249:
+          address: ${_param:openstack_vcmp_node249_address}
+          names:
+          - ${_param:openstack_vcmp_node249_hostname}
+          - ${_param:openstack_vcmp_node249_hostname}.${_param:cluster_domain}
+        vcmp250:
+          address: ${_param:openstack_vcmp_node250_address}
+          names:
+          - ${_param:openstack_vcmp_node250_hostname}
+          - ${_param:openstack_vcmp_node250_hostname}.${_param:cluster_domain}
+        vcmp251:
+          address: ${_param:openstack_vcmp_node251_address}
+          names:
+          - ${_param:openstack_vcmp_node251_hostname}
+          - ${_param:openstack_vcmp_node251_hostname}.${_param:cluster_domain}
+        vcmp252:
+          address: ${_param:openstack_vcmp_node252_address}
+          names:
+          - ${_param:openstack_vcmp_node252_hostname}
+          - ${_param:openstack_vcmp_node252_hostname}.${_param:cluster_domain}
+        vcmp253:
+          address: ${_param:openstack_vcmp_node253_address}
+          names:
+          - ${_param:openstack_vcmp_node253_hostname}
+          - ${_param:openstack_vcmp_node253_hostname}.${_param:cluster_domain}
+        vcmp254:
+          address: ${_param:openstack_vcmp_node254_address}
+          names:
+          - ${_param:openstack_vcmp_node254_hostname}
+          - ${_param:openstack_vcmp_node254_hostname}.${_param:cluster_domain}
diff --git a/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/openstack_vcompute_cluster.yml b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/openstack_vcompute_cluster.yml
new file mode 100644
index 0000000..f5a46f8
--- /dev/null
+++ b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/openstack_vcompute_cluster.yml
@@ -0,0 +1,1788 @@
+parameters:
+  _param:
+    openstack_database_backend_image: ${_param:salt_control_xenial_image_backend}
+    salt_control_cluster_node_cloud_init_openstack_database:
+      user_data:
+        write_files:
+        - content: |
+            ${salt:control:size:openstack.database:image_layout}
+          owner: root:root
+          path: /usr/share/growlvm/image-layout.yml
+  salt:
+    control:
+      size:
+        openstack.database:
+          cpu: 2
+          ram: 2048
+          disk_profile: small
+          net_profile: vcmp
+          image_layout: ${_param:salt_control_size_image_layout_openstack_database}
+      cluster:
+        internal:
+          domain: ${_param:cluster_domain}
+          engine: virt
+          node:
+            vcmp3:
+              name: ${_param:openstack_vcmp_node3_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp4:
+              name: ${_param:openstack_vcmp_node4_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp5:
+              name: ${_param:openstack_vcmp_node5_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp6:
+              name: ${_param:openstack_vcmp_node6_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp7:
+              name: ${_param:openstack_vcmp_node7_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp8:
+              name: ${_param:openstack_vcmp_node8_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp9:
+              name: ${_param:openstack_vcmp_node9_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp10:
+              name: ${_param:openstack_vcmp_node10_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp11:
+              name: ${_param:openstack_vcmp_node11_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp12:
+              name: ${_param:openstack_vcmp_node12_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp13:
+              name: ${_param:openstack_vcmp_node13_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp14:
+              name: ${_param:openstack_vcmp_node14_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp15:
+              name: ${_param:openstack_vcmp_node15_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp16:
+              name: ${_param:openstack_vcmp_node16_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp17:
+              name: ${_param:openstack_vcmp_node17_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp18:
+              name: ${_param:openstack_vcmp_node18_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp19:
+              name: ${_param:openstack_vcmp_node19_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp20:
+              name: ${_param:openstack_vcmp_node20_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp21:
+              name: ${_param:openstack_vcmp_node21_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp22:
+              name: ${_param:openstack_vcmp_node22_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp23:
+              name: ${_param:openstack_vcmp_node23_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp24:
+              name: ${_param:openstack_vcmp_node24_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp25:
+              name: ${_param:openstack_vcmp_node25_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp26:
+              name: ${_param:openstack_vcmp_node26_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp27:
+              name: ${_param:openstack_vcmp_node27_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp28:
+              name: ${_param:openstack_vcmp_node28_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp29:
+              name: ${_param:openstack_vcmp_node29_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp30:
+              name: ${_param:openstack_vcmp_node30_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp31:
+              name: ${_param:openstack_vcmp_node31_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp32:
+              name: ${_param:openstack_vcmp_node32_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp33:
+              name: ${_param:openstack_vcmp_node33_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp34:
+              name: ${_param:openstack_vcmp_node34_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp35:
+              name: ${_param:openstack_vcmp_node35_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp36:
+              name: ${_param:openstack_vcmp_node36_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp37:
+              name: ${_param:openstack_vcmp_node37_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp38:
+              name: ${_param:openstack_vcmp_node38_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp39:
+              name: ${_param:openstack_vcmp_node39_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp40:
+              name: ${_param:openstack_vcmp_node40_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp41:
+              name: ${_param:openstack_vcmp_node41_hostname}
+              provider: ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp42:
+              name: ${_param:openstack_vcmp_node42_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp43:
+              name: ${_param:openstack_vcmp_node43_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp44:
+              name: ${_param:openstack_vcmp_node44_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp45:
+              name: ${_param:openstack_vcmp_node45_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp46:
+              name: ${_param:openstack_vcmp_node46_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp47:
+              name: ${_param:openstack_vcmp_node47_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp48:
+              name: ${_param:openstack_vcmp_node48_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp49:
+              name: ${_param:openstack_vcmp_node49_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp50:
+              name: ${_param:openstack_vcmp_node50_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp51:
+              name: ${_param:openstack_vcmp_node51_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp52:
+              name: ${_param:openstack_vcmp_node52_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp53:
+              name: ${_param:openstack_vcmp_node53_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp54:
+              name: ${_param:openstack_vcmp_node54_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp55:
+              name: ${_param:openstack_vcmp_node55_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp56:
+              name: ${_param:openstack_vcmp_node56_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp57:
+              name: ${_param:openstack_vcmp_node57_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp58:
+              name: ${_param:openstack_vcmp_node58_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp59:
+              name: ${_param:openstack_vcmp_node59_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp60:
+              name: ${_param:openstack_vcmp_node60_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp61:
+              name: ${_param:openstack_vcmp_node61_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp62:
+              name: ${_param:openstack_vcmp_node62_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp63:
+              name: ${_param:openstack_vcmp_node63_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp64:
+              name: ${_param:openstack_vcmp_node64_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp65:
+              name: ${_param:openstack_vcmp_node65_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp66:
+              name: ${_param:openstack_vcmp_node66_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp67:
+              name: ${_param:openstack_vcmp_node67_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp68:
+              name: ${_param:openstack_vcmp_node68_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp69:
+              name: ${_param:openstack_vcmp_node69_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp70:
+              name: ${_param:openstack_vcmp_node70_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp71:
+              name: ${_param:openstack_vcmp_node71_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp72:
+              name: ${_param:openstack_vcmp_node72_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp73:
+              name: ${_param:openstack_vcmp_node73_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp74:
+              name: ${_param:openstack_vcmp_node74_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp75:
+              name: ${_param:openstack_vcmp_node75_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp76:
+              name: ${_param:openstack_vcmp_node76_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp77:
+              name: ${_param:openstack_vcmp_node77_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp78:
+              name: ${_param:openstack_vcmp_node78_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp79:
+              name: ${_param:openstack_vcmp_node79_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp80:
+              name: ${_param:openstack_vcmp_node80_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp81:
+              name: ${_param:openstack_vcmp_node81_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp82:
+              name: ${_param:openstack_vcmp_node82_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp83:
+              name: ${_param:openstack_vcmp_node83_hostname}
+              provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp84:
+              name: ${_param:openstack_vcmp_node84_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp85:
+              name: ${_param:openstack_vcmp_node85_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp86:
+              name: ${_param:openstack_vcmp_node86_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp87:
+              name: ${_param:openstack_vcmp_node87_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp88:
+              name: ${_param:openstack_vcmp_node88_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp89:
+              name: ${_param:openstack_vcmp_node89_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp90:
+              name: ${_param:openstack_vcmp_node90_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp91:
+              name: ${_param:openstack_vcmp_node91_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp92:
+              name: ${_param:openstack_vcmp_node92_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp93:
+              name: ${_param:openstack_vcmp_node93_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp94:
+              name: ${_param:openstack_vcmp_node94_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp95:
+              name: ${_param:openstack_vcmp_node95_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp96:
+              name: ${_param:openstack_vcmp_node96_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp97:
+              name: ${_param:openstack_vcmp_node97_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp98:
+              name: ${_param:openstack_vcmp_node98_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp99:
+              name: ${_param:openstack_vcmp_node99_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp100:
+              name: ${_param:openstack_vcmp_node100_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp101:
+              name: ${_param:openstack_vcmp_node101_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp102:
+              name: ${_param:openstack_vcmp_node102_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp103:
+              name: ${_param:openstack_vcmp_node103_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp104:
+              name: ${_param:openstack_vcmp_node104_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp105:
+              name: ${_param:openstack_vcmp_node105_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp106:
+              name: ${_param:openstack_vcmp_node106_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp107:
+              name: ${_param:openstack_vcmp_node107_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp108:
+              name: ${_param:openstack_vcmp_node108_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp109:
+              name: ${_param:openstack_vcmp_node109_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp110:
+              name: ${_param:openstack_vcmp_node110_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp111:
+              name: ${_param:openstack_vcmp_node111_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp112:
+              name: ${_param:openstack_vcmp_node112_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp113:
+              name: ${_param:openstack_vcmp_node113_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp114:
+              name: ${_param:openstack_vcmp_node114_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp115:
+              name: ${_param:openstack_vcmp_node115_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp116:
+              name: ${_param:openstack_vcmp_node116_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp117:
+              name: ${_param:openstack_vcmp_node117_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp118:
+              name: ${_param:openstack_vcmp_node118_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp119:
+              name: ${_param:openstack_vcmp_node119_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp120:
+              name: ${_param:openstack_vcmp_node120_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp121:
+              name: ${_param:openstack_vcmp_node121_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp122:
+              name: ${_param:openstack_vcmp_node122_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp123:
+              name: ${_param:openstack_vcmp_node123_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp124:
+              name: ${_param:openstack_vcmp_node124_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp125:
+              name: ${_param:openstack_vcmp_node125_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp126:
+              name: ${_param:openstack_vcmp_node126_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp127:
+              name: ${_param:openstack_vcmp_node127_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp128:
+              name: ${_param:openstack_vcmp_node128_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp129:
+              name: ${_param:openstack_vcmp_node129_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp130:
+              name: ${_param:openstack_vcmp_node130_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp131:
+              name: ${_param:openstack_vcmp_node131_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp132:
+              name: ${_param:openstack_vcmp_node132_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp133:
+              name: ${_param:openstack_vcmp_node133_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp134:
+              name: ${_param:openstack_vcmp_node134_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp135:
+              name: ${_param:openstack_vcmp_node135_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp136:
+              name: ${_param:openstack_vcmp_node136_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp137:
+              name: ${_param:openstack_vcmp_node137_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp138:
+              name: ${_param:openstack_vcmp_node138_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp139:
+              name: ${_param:openstack_vcmp_node139_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp140:
+              name: ${_param:openstack_vcmp_node140_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp141:
+              name: ${_param:openstack_vcmp_node141_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp142:
+              name: ${_param:openstack_vcmp_node142_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp143:
+              name: ${_param:openstack_vcmp_node143_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp144:
+              name: ${_param:openstack_vcmp_node144_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp145:
+              name: ${_param:openstack_vcmp_node145_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp146:
+              name: ${_param:openstack_vcmp_node146_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp147:
+              name: ${_param:openstack_vcmp_node147_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp148:
+              name: ${_param:openstack_vcmp_node148_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp149:
+              name: ${_param:openstack_vcmp_node149_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp150:
+              name: ${_param:openstack_vcmp_node150_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp151:
+              name: ${_param:openstack_vcmp_node151_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp152:
+              name: ${_param:openstack_vcmp_node152_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp153:
+              name: ${_param:openstack_vcmp_node153_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp154:
+              name: ${_param:openstack_vcmp_node154_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp155:
+              name: ${_param:openstack_vcmp_node155_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp156:
+              name: ${_param:openstack_vcmp_node156_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp157:
+              name: ${_param:openstack_vcmp_node157_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp158:
+              name: ${_param:openstack_vcmp_node158_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp159:
+              name: ${_param:openstack_vcmp_node159_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp160:
+              name: ${_param:openstack_vcmp_node160_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp161:
+              name: ${_param:openstack_vcmp_node161_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp162:
+              name: ${_param:openstack_vcmp_node162_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp163:
+              name: ${_param:openstack_vcmp_node163_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp164:
+              name: ${_param:openstack_vcmp_node164_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp165:
+              name: ${_param:openstack_vcmp_node165_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp166:
+              name: ${_param:openstack_vcmp_node166_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp167:
+              name: ${_param:openstack_vcmp_node167_hostname}
+              provider: ${_param:infra_kvm_node04_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp168:
+              name: ${_param:openstack_vcmp_node168_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp169:
+              name: ${_param:openstack_vcmp_node169_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp170:
+              name: ${_param:openstack_vcmp_node170_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp171:
+              name: ${_param:openstack_vcmp_node171_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp172:
+              name: ${_param:openstack_vcmp_node172_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp173:
+              name: ${_param:openstack_vcmp_node173_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp174:
+              name: ${_param:openstack_vcmp_node174_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp175:
+              name: ${_param:openstack_vcmp_node175_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp176:
+              name: ${_param:openstack_vcmp_node176_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp177:
+              name: ${_param:openstack_vcmp_node177_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp178:
+              name: ${_param:openstack_vcmp_node178_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp179:
+              name: ${_param:openstack_vcmp_node179_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp180:
+              name: ${_param:openstack_vcmp_node180_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp181:
+              name: ${_param:openstack_vcmp_node181_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp182:
+              name: ${_param:openstack_vcmp_node182_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp183:
+              name: ${_param:openstack_vcmp_node183_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp184:
+              name: ${_param:openstack_vcmp_node184_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp185:
+              name: ${_param:openstack_vcmp_node185_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp186:
+              name: ${_param:openstack_vcmp_node186_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp187:
+              name: ${_param:openstack_vcmp_node187_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp188:
+              name: ${_param:openstack_vcmp_node188_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp189:
+              name: ${_param:openstack_vcmp_node189_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp190:
+              name: ${_param:openstack_vcmp_node190_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp191:
+              name: ${_param:openstack_vcmp_node191_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp192:
+              name: ${_param:openstack_vcmp_node192_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp193:
+              name: ${_param:openstack_vcmp_node193_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp194:
+              name: ${_param:openstack_vcmp_node194_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp195:
+              name: ${_param:openstack_vcmp_node195_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp196:
+              name: ${_param:openstack_vcmp_node196_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp197:
+              name: ${_param:openstack_vcmp_node197_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp198:
+              name: ${_param:openstack_vcmp_node198_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp199:
+              name: ${_param:openstack_vcmp_node199_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp200:
+              name: ${_param:openstack_vcmp_node200_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp201:
+              name: ${_param:openstack_vcmp_node201_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp202:
+              name: ${_param:openstack_vcmp_node202_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp203:
+              name: ${_param:openstack_vcmp_node203_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp204:
+              name: ${_param:openstack_vcmp_node204_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp205:
+              name: ${_param:openstack_vcmp_node205_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp206:
+              name: ${_param:openstack_vcmp_node206_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp207:
+              name: ${_param:openstack_vcmp_node207_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp208:
+              name: ${_param:openstack_vcmp_node208_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp209:
+              name: ${_param:openstack_vcmp_node209_hostname}
+              provider: ${_param:infra_kvm_node05_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp210:
+              name: ${_param:openstack_vcmp_node210_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp211:
+              name: ${_param:openstack_vcmp_node211_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp212:
+              name: ${_param:openstack_vcmp_node212_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp213:
+              name: ${_param:openstack_vcmp_node213_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp214:
+              name: ${_param:openstack_vcmp_node214_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp215:
+              name: ${_param:openstack_vcmp_node215_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp216:
+              name: ${_param:openstack_vcmp_node216_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp217:
+              name: ${_param:openstack_vcmp_node217_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp218:
+              name: ${_param:openstack_vcmp_node218_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp219:
+              name: ${_param:openstack_vcmp_node219_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp220:
+              name: ${_param:openstack_vcmp_node220_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp221:
+              name: ${_param:openstack_vcmp_node221_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp222:
+              name: ${_param:openstack_vcmp_node222_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp223:
+              name: ${_param:openstack_vcmp_node223_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp224:
+              name: ${_param:openstack_vcmp_node224_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp225:
+              name: ${_param:openstack_vcmp_node225_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp226:
+              name: ${_param:openstack_vcmp_node226_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp227:
+              name: ${_param:openstack_vcmp_node227_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp228:
+              name: ${_param:openstack_vcmp_node228_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp229:
+              name: ${_param:openstack_vcmp_node229_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp230:
+              name: ${_param:openstack_vcmp_node230_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp231:
+              name: ${_param:openstack_vcmp_node231_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp232:
+              name: ${_param:openstack_vcmp_node232_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp233:
+              name: ${_param:openstack_vcmp_node233_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp234:
+              name: ${_param:openstack_vcmp_node234_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp235:
+              name: ${_param:openstack_vcmp_node235_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp236:
+              name: ${_param:openstack_vcmp_node236_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp237:
+              name: ${_param:openstack_vcmp_node237_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp238:
+              name: ${_param:openstack_vcmp_node238_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp239:
+              name: ${_param:openstack_vcmp_node239_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp240:
+              name: ${_param:openstack_vcmp_node240_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp241:
+              name: ${_param:openstack_vcmp_node241_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp242:
+              name: ${_param:openstack_vcmp_node242_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp243:
+              name: ${_param:openstack_vcmp_node243_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp244:
+              name: ${_param:openstack_vcmp_node244_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp245:
+              name: ${_param:openstack_vcmp_node245_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp246:
+              name: ${_param:openstack_vcmp_node246_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp247:
+              name: ${_param:openstack_vcmp_node247_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp248:
+              name: ${_param:openstack_vcmp_node248_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp249:
+              name: ${_param:openstack_vcmp_node249_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp250:
+              name: ${_param:openstack_vcmp_node250_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp251:
+              name: ${_param:openstack_vcmp_node251_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp252:
+              name: ${_param:openstack_vcmp_node252_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp253:
+              name: ${_param:openstack_vcmp_node253_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
+            vcmp254:
+              name: ${_param:openstack_vcmp_node254_hostname}
+              provider: ${_param:infra_kvm_node06_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              backend: ${_param:openstack_database_backend_image}
+              size: openstack.database
+              cloud_init: ${_param:salt_control_cluster_node_cloud_init_openstack_database}
diff --git a/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
new file mode 100644
index 0000000..a0d2ef6
--- /dev/null
+++ b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -0,0 +1,876 @@
+default_context:
+  ironic_enabled: True
+  openstack_baremetal_hostname: bmt
+  openstack_baremetal_address_baremetal: 10.14.0.10
+  openstack_baremetal_node01_baremetal_address: 10.14.0.11
+  openstack_baremetal_node02_baremetal_address: 10.14.0.12
+  openstack_baremetal_node03_baremetal_address: 10.14.0.13
+  openstack_baremetal_node01_address: 10.167.11.21
+  openstack_baremetal_node02_address: 10.167.11.22
+  openstack_baremetal_node03_address: 10.167.11.23
+  openstack_baremetal_neutron_subnet_cidr: 10.14.0.0/24
+  openstack_baremetal_neutron_subnet_allocation_start: 10.14.0.100
+  openstack_baremetal_neutron_subnet_allocation_end: 10.14.0.200
+  openstack_baremetal_address: 10.167.11.20
+  openstack_baremetal_interface: ens7
+  openstack_baremetal_vip_interface: br_baremetal
+  jenkins_cfg_admin_password: r00tme
+  bmk_enabled: 'False'
+  cicd_control_node01_address: 10.167.11.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_address: 10.167.11.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_address: 10.167.11.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: 10.167.11.90
+  cicd_control_vip_hostname: cid
+  cicd_enabled: 'True'
+  cluster_domain: released-bm-b300-cicd-queens-ovs-maas.local
+  cluster_name: released-bm-b300-cicd-queens-ovs-maas
+  compute_bond_mode: active-backup
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: zEFbUBMME6LFdiL0rJWFgHMdQGgywnDSE9vFYvHgEBeYHb4QJsDl3HqpdaTgqYlF
+  control_network_netmask: 255.255.254.0
+  control_network_subnet: 10.167.11.0/23
+  control_vlan: '2404'
+
+  jenkins_pipelines_branch: 'release/2019.2.0'
+  deploy_network_gateway: 172.16.180.1
+  deploy_network_netmask: 255.255.254.0
+  deploy_network_subnet: 172.16.180.0/23
+  deployment_type: physical
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.224.6
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.11.241
+  infra_kvm01_deploy_address: 172.16.180.3
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.11.242
+  infra_kvm02_deploy_address: 172.16.180.4
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.11.243
+  infra_kvm03_deploy_address: 172.16.180.5
+  infra_kvm03_hostname: kvm03
+  infra_kvm04_control_address: 10.167.11.244
+  infra_kvm04_deploy_address: 172.16.180.6
+  infra_kvm04_hostname: kvm04
+  infra_kvm05_control_address: 10.167.11.245
+  infra_kvm05_deploy_address: 172.16.180.7
+  infra_kvm05_hostname: kvm05
+  infra_kvm06_control_address: 10.167.11.246
+  infra_kvm06_deploy_address: 172.16.180.8
+  infra_kvm06_hostname: kvm06
+  infra_kvm_vip_address: 10.167.11.240
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_enabled: 'True'
+  maas_deploy_address: 172.16.180.2
+  maas_deploy_cidr: 172.16.180.0/23
+  maas_deploy_gateway: 172.16.180.1
+  maas_deploy_range_end: 172.16.181.250
+  maas_deploy_range_start: 172.16.180.18
+  maas_dhcp_enabled: 'True'
+  maas_fabric_name: fabric-0
+  maas_hostname: cfg01
+  maas_manage_deploy_network: 'True'
+  maas_machines: |
+        kvm01: # #cz7625-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          # pxe_interface_mac:
+          pxe_interface_mac: "0c:c4:7a:33:24:be"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:33:24:be"
+              mode: "static"
+              ip: "172.16.180.3"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:33:24:bf"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:01:3e"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:01:3f"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:58:f3:ce"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:58:f3:cf"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.59.227"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        kvm02: # #cz7627-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:33:2d:6a"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:33:2d:6a"
+              mode: "static"
+              ip: "172.16.180.4"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:33:2d:6b"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:58:43:b8"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:58:43:b9"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1d:96:02"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1d:96:03"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.59.229"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        kvm03: # #cz7756-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:69:a0:4c"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:69:a0:4c"
+              mode: "static"
+              ip: "172.16.180.5"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:69:a0:4d"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "00:25:90:c0:c2:14"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "00:25:90:c0:c2:15"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:09:c2"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:09:c3"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.225.88"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        kvm04: # #cz7792-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          # pxe_interface_mac:
+          pxe_interface_mac: "0c:c4:7a:6c:83:5c"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:6c:83:5c"
+              mode: "static"
+              ip: "172.16.180.6"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:6c:83:5d"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:7d:98"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:7d:99"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:03:de"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:03:df"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.225.112"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        kvm05: # #cz7876-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:6c:88:d6"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:6c:88:d6"
+              mode: "static"
+              ip: "172.16.180.7"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:6c:88:d7"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:03:74"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:03:75"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:89:be"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:89:bf"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.225.208"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        kvm06: # #cz8073-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:df:ac"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:df:ac"
+              mode: "static"
+              ip: "172.16.180.8"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:df:ad"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:3a:f2"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:3a:f3"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:a6:4c"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:a6:4d"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.227.118"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        gtw01: # #cz9039-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:d5:84"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:d5:84"
+              mode: "static"
+              ip: "172.16.180.9"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:d5:85"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:58:41:b0"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:58:41:b1"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1d:90:d2"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1d:90:d3"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.229.28"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        gtw02: # #cz9048-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:d5:82"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:d5:82"
+              mode: "static"
+              ip: "172.16.180.10"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:d5:83"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:00:7c"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:00:7d"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:bc:88:8a"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:bc:88:8b"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.225.23"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        gtw03: # #cz8159-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:6c:bc:f6"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:6c:bc:f6"
+              mode: "static"
+              ip: "172.16.180.11"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:6c:bc:f7"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "00:25:90:9b:cc:32"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "00:25:90:9b:cc:33"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "00:25:90:c1:a5:04"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "00:25:90:c1:a5:05"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.58.9"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        osd001: # #cz9040-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:c9:02"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:c9:02"
+              mode: "static"
+              ip: "172.16.180.12"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:c9:03"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:aa:90"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:aa:91"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:0a:a4"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:0a:a5"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.58.246"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        osd002: # #cz9041-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:d5:60"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:d5:60"
+              mode: "static"
+              ip: "172.16.180.13"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:d5:61"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:04:2c"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:04:2d"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:01:f2"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:01:f3"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.58.243"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        osd003: # #cz9042-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:c9:3a"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:c9:3a"
+              mode: "static"
+              ip: "172.16.180.14"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:c9:3b"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "00:25:90:33:d7:10"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "00:25:90:33:d7:11"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "00:25:90:0b:5f:50"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "00:25:90:0b:5f:51"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.58.244"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        cmp001: # #cz9039-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:d6:aa"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:d6:aa"
+              mode: "static"
+              ip: "172.16.180.15"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:d6:ab"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:86:76"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:86:77"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:39:3c"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:39:3d"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.58.248"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        cmp002: # #cz9046-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:ce:30"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:ce:30"
+              mode: "static"
+              ip: "172.16.180.16"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:ce:31"
+              name: one2
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:ce:31"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "00:25:90:e0:7d:e0"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "00:25:90:e0:7d:e1"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:0c:0e"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:0c:0f"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.59.222"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        cmp003: # #cz8061-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:e0:ce"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:e0:ce"
+              mode: "static"
+              ip: "172.16.180.17"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:e0:cf"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1d:94:5e"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1d:94:5f"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:87:e4"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:87:e5"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.225.228"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+  mcp_version: proposed
+  mcp_docker_registry: docker-prod-local.docker.mirantis.net
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openldap_domain: released-bm-b300-cicd-queens-ovs-maas.local
+  openldap_enabled: 'True'
+  openldap_organisation: ${_param:cluster_name}
+  openstack_benchmark_node01_address: 10.167.11.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: golden
+  openstack_compute_count: '3'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_single_address_ranges: 10.167.11.15-10.167.11.17
+  openstack_compute_deploy_address_ranges: 172.16.180.15-172.16.180.17
+  openstack_compute_tenant_address_ranges: 10.167.12.15-10.167.12.17
+  openstack_compute_backend_address_ranges: 10.167.12.15-10.167.12.17
+  openstack_control_address: 10.167.11.10
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.167.11.11
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.167.11.12
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.167.11.13
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.167.11.50
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: 10.167.11.51
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: 10.167.11.52
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: 10.167.11.53
+  openstack_database_node03_hostname: dbs03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_deploy_address: 172.16.180.9
+  openstack_gateway_node02_deploy_address: 172.16.180.10
+  openstack_gateway_node03_deploy_address: 172.16.180.11
+  openstack_gateway_node01_address: 10.167.11.224
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node01_tenant_address: 10.167.12.9
+  openstack_gateway_node02_address: 10.167.11.225
+  openstack_gateway_node02_tenant_address: 10.167.12.10
+  openstack_gateway_node03_address: 10.167.11.226
+  openstack_gateway_node03_tenant_address: 10.167.12.11
+  openstack_message_queue_address: 10.167.11.40
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: 10.167.11.41
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: 10.167.11.42
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: 10.167.11.43
+  openstack_message_queue_node03_hostname: msg03
+  openstack_network_engine: ovs
+  openstack_neutron_qos: 'True'
+  openstack_neutron_vlan_aware_vms: 'True'
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_hugepages_count: '16'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_nfv_sriov_network: physnet2
+  openstack_nfv_sriov_numvfs: '7'
+  openstack_nfv_sriov_pf_nic: enp5s0f1
+  openstack_nova_cpu_pinning: 6,7,8,9,10,11
+  openstack_nova_compute_reserved_host_memory_mb: '900'
+  openstack_ovs_dvr_enabled: 'True'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 10.167.11.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.167.11.81
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.167.11.82
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 10.167.11.19
+  openstack_version: queens
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  salt_api_password: HlcaUHzUnsWsg62uhF8ua5KEbqRbzijz
+  salt_api_password_hash: $6$qdIFillN$XnzP7oIXRcbroVch7nlthyrSekjKlWND8q2MtoMF3Wz2ymepjAOjyqpyR55nmbH9OQzS8EcQJ6sfr5hWKDesV1
+  salt_master_address: 10.167.11.5
+  salt_master_hostname: cfg01
+  salt_master_management_address: 172.16.180.2
+  stacklight_enabled: 'True'
+  stacklight_log_address: 10.167.11.60
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 10.167.11.61
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 10.167.11.62
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 10.167.11.63
+  stacklight_log_node03_hostname: log03
+  stacklight_long_term_storage_type: prometheus
+  stacklight_monitor_address: 10.167.11.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 10.167.11.71
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 10.167.11.72
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 10.167.11.73
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_telemetry_address: 10.167.11.96
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 10.167.11.97
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 10.167.11.98
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 10.167.11.99
+  stacklight_telemetry_node03_hostname: mtr03
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.167.12.1
+  tenant_network_netmask: 255.255.254.0
+  tenant_network_subnet: 10.167.12.0/23
+  tenant_vlan: '2406'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'True'
+  ceph_enabled: 'True'
+  ceph_version: "nautilus"
+  ceph_hyper_converged: "False"
+  rbd_monitoring_enabled: 'True'
+  rbd_pool_stats_gnocchi: 'True'
+  rbd_pool_stats_volumes: 'True'
+  rbd_pool_stats_images: 'True'
+  rbd_pool_stats_backups: 'True'
+  rbd_pool_stats_vms : 'True'
+  # Apply settings for ceph from contexts/ceph/nautilus-collocated-block-db.yml
+  ceph_osd_backend: "bluestore"
+  ceph_osds_per_device: '1'
+  ceph_osd_data_size: ''
+  ceph_osd_dmcrypt: False
+  ceph_osd_count: "3"
+  ceph_osd_node_count: 3
+  ceph_osd_block_db_size: 20
+  ceph_osd_journal_size: 20
+  ceph_osd_bond_mode: "active-backup"
+  ceph_osd_data_partition_prefix: ""
+  ceph_public_network_allocation: storage
+  ceph_cluster_network: "10.167.11.0/24"
+  ceph_osd_single_address_ranges: "10.167.11.200-10.167.11.202"
+  ceph_osd_deploy_address_ranges: "172.16.180.8-172.16.180.10"
+  ceph_osd_storage_address_ranges: "10.167.11.200-10.167.11.202"
+  ceph_osd_backend_address_ranges: "10.167.12.200-10.167.12.202"
+
+  ceph_public_network: 10.167.11.0/24
+
+  ceph_osd_data_disks: "/dev/sdb"
+  ceph_osd_journal_or_block_db_disks: ""
+  ceph_osd_block_partition_prefix: ''
+  ceph_osd_mode: "separated"
+  ceph_osd_deploy_nic: "eth0"
+  ceph_osd_primary_first_nic: "eth1"
+  ceph_osd_primary_second_nic: "eth2"
+  ceph_mon_node01_address: "10.167.11.66"
+  ceph_mon_node01_hostname: "cmn01"
+  ceph_mon_node02_address: "10.167.11.67"
+  ceph_mon_node02_hostname: "cmn02"
+  ceph_mon_node03_address: "10.167.11.68"
+  ceph_mon_node03_hostname: "cmn03"
+  ceph_rgw_address: "10.167.11.75"
+  ceph_rgw_node01_address: "10.167.11.76"
+  ceph_rgw_node01_hostname: "rgw01"
+  ceph_rgw_node02_address: "10.167.11.77"
+  ceph_rgw_node02_hostname: "rgw02"
+  ceph_rgw_node03_address: "10.167.11.78"
+  ceph_rgw_node03_hostname: "rgw03"
+  rsync_fernet_rotation: 'True'
+  compute_padding_with_zeros: True
+  designate_backend: powerdns
+  designate_enabled: 'True'
+  openstack_dns_node01_address: 10.167.11.113
+  openstack_dns_node02_address: 10.167.11.114
+  nova_vnc_tls_enabled: 'True'
+  galera_ssl_enabled: 'True'
+  openstack_mysql_x509_enabled: 'True'
+  rabbitmq_ssl_enabled: 'True'
+  openstack_rabbitmq_x509_enabled: 'True'
+  openstack_rabbitmq_standalone_mode: 'True'
+  openstack_internal_protocol: 'https'
+  tenant_telemetry_enabled: 'True'
+  gnocchi_aggregation_storage: ceph
+  openstack_telemetry_address: 10.167.11.83
+  openstack_telemetry_hostname: mdb
+  openstack_telemetry_node01_address: 10.167.11.84
+  openstack_telemetry_node01_hostname: mdb01
+  openstack_telemetry_node02_address: 10.167.11.85
+  openstack_telemetry_node02_hostname: mdb02
+  openstack_telemetry_node03_address: 10.167.11.86
+  openstack_telemetry_node03_hostname: mdb03
+  barbican_backend: dogtag
+  barbican_enabled: 'True'
+  barbican_integration_enabled: 'False'
+  openstack_barbican_address: 10.167.11.44
+  openstack_barbican_hostname: kmn
+  openstack_barbican_node01_address: 10.167.11.45
+  openstack_barbican_node01_hostname: kmn01
+  openstack_barbican_node02_address: 10.167.11.46
+  openstack_barbican_node02_hostname: kmn02
+  openstack_barbican_node03_address: 10.167.11.47
+  openstack_barbican_node03_hostname: kmn03
+  openstack_create_public_network: 'True'
+  openstack_public_neutron_subnet_gateway: 172.17.42.1
+  openstack_public_neutron_subnet_cidr: 172.17.42.0/26
+  openstack_public_neutron_subnet_allocation_start: 172.17.42.20
+  openstack_public_neutron_subnet_allocation_end: 172.17.42.55
+  backend_vlan: '2402'
+  manila_enabled: 'False'
+  openscap_enabled: 'True'
+  octavia_health_manager_node01_address: 192.168.1.10
+  octavia_health_manager_node02_address: 192.168.1.11
+  octavia_health_manager_node03_address: 192.168.1.12
+  octavia_manager_cluster: 'True'
+  octavia_amphora_topology: 'ACTIVE_STANDBY'
+  octavia_spare_amphora_pool_size: 1
+  octavia_lb_mgmt_cidr: 192.168.1.0/24
+  octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
+  octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
+  openstack_octavia_enabled: 'True'
+  secrets_encryption_enabled: 'True'
+  secrets_encryption_key_id: 'F5CB2ADC36159B03'
+  # Used on CI only.
+  secrets_encryption_private_key: ''
+  cinder_backup_engine: 'ceph'
+  cinder_ceph_backup_pool_name: 'backups'
+  openstack_keystone_security:
+    disable_user_account_days_inactive: 7
+    lockout_failure_attempts: 60
+    lockout_duration: 600
+    password_expires_days: 730
+    unique_last_password_count: 5
+    minimum_password_age: 0
+    password_regex: "'^[a-zA-Z0-9~!@#%^&\\*_=+]{32,}$$'"
+    password_regex_description: "Your password could contains capital letters, lowercase letters, digits, symbols '~ ! @ # % ^ & * _ = +' and have a minimum length of 32 characters"
+    change_password_upon_first_use: False
+  stacklight_ssl_enabled: 'True'
diff --git a/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/salt-context-environment.yaml b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/salt-context-environment.yaml
new file mode 100644
index 0000000..773f065
--- /dev/null
+++ b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/salt-context-environment.yaml
@@ -0,0 +1,200 @@
+nodes:
+    cfg01.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      - features_runtest
+      interfaces:
+        ens3:
+          role: single_static_mgm
+        ens4:
+          role: single_static_ctl
+
+    # Physical nodes
+    kvm01.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: infra_kvm_node01
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
+        ten1:
+          role: bond_baremetal_lacp
+        sten1:
+          role: bond_baremetal_lacp
+
+    kvm02.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: infra_kvm_node02
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
+        ten1:
+          role: bond_baremetal_lacp
+        sten1:
+          role: bond_baremetal_lacp
+
+    kvm03.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: infra_kvm_node03
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
+        ten1:
+          role: bond_baremetal_lacp
+        sten1:
+          role: bond_baremetal_lacp
+
+    kvm04.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: infra_kvm_node04
+      roles:
+      - infra_kvm_wo_gluster
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
+        ten1:
+          role: bond_baremetal_lacp
+        sten1:
+          role: bond_baremetal_lacp
+
+    kvm05.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: infra_kvm_node05
+      roles:
+      - infra_kvm_wo_gluster
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
+        ten1:
+          role: bond_baremetal_lacp
+        sten1:
+          role: bond_baremetal_lacp
+
+    kvm06.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: infra_kvm_node06
+      roles:
+      - infra_kvm_wo_gluster
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
+        ten1:
+          role: bond_baremetal_lacp
+        sten1:
+          role: bond_baremetal_lacp
+
+    osd<<count>>:
+      reclass_storage_name: ceph_osd_rack01
+      roles:
+      - ceph_osd
+      - linux_system_codename_xenial
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
+#          role: bond0_ab_vlan_ceph_storage_backend
+
+    cmp<<count>>:
+      reclass_storage_name: openstack_compute_rack01
+      roles:
+      - openstack_compute
+      - linux_system_codename_xenial
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten1:
+          role: bond_ctl_lacp
+        ten2:
+          role: bond_prv_lacp
+        sten1:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_prv_lacp
+
+    gtw01.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_gateway_node01
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten1:
+          role: bond_ctl_baremetal_lacp
+        ten2:
+          role: bond_prv_lacp
+        sten1:
+          role: bond_ctl_baremetal_lacp
+        sten2:
+          role: bond_prv_lacp
+
+    gtw02.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_gateway_node02
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten1:
+          role: bond_ctl_baremetal_lacp
+        ten2:
+          role: bond_prv_lacp
+        sten1:
+          role: bond_ctl_baremetal_lacp
+        sten2:
+          role: bond_prv_lacp
+
+    gtw03.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_gateway_node03
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten1:
+          role: bond_ctl_baremetal_lacp
+        ten2:
+          role: bond_prv_lacp
+        sten1:
+          role: bond_ctl_baremetal_lacp
+        sten2:
+          role: bond_prv_lacp
diff --git a/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/salt-context-vcp-environment.yaml b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/salt-context-vcp-environment.yaml
new file mode 100644
index 0000000..2bc74b7
--- /dev/null
+++ b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/salt-context-vcp-environment.yaml
@@ -0,0 +1,448 @@
+nodes:
+    ctl01.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_control_node01
+      roles:
+      - openstack_control_leader
+      - linux_system_codename_xenial
+      - features_ironic_baremetal_nodes
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    ctl02.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_control_node02
+      roles:
+      - openstack_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    ctl03.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_control_node03
+      roles:
+      - openstack_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    dbs01.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_database_node01
+      roles:
+      - openstack_database_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    dbs02.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_database_node02
+      roles:
+      - openstack_database
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    dbs03.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_database_node03
+      roles:
+      - openstack_database
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    msg01.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_message_queue_node01
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    msg02.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_message_queue_node02
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    msg03.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_message_queue_node03
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    prx01.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_proxy_node01
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    prx02.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_proxy_node02
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    cid01.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: cicd_control_node01
+      roles:
+      - cicd_control_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    cid02.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: cicd_control_node02
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    cid03.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: cicd_control_node03
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    mon01.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: stacklight_server_node01
+      roles:
+      - stacklightv2_server_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    mon02.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: stacklight_server_node02
+      roles:
+      - stacklightv2_server
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    mon03.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: stacklight_server_node03
+      roles:
+      - stacklightv2_server
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    mtr01.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: stacklight_telemetry_node01
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    mtr02.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: stacklight_telemetry_node02
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    mtr03.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: stacklight_telemetry_node03
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    log01.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: stacklight_log_node01
+      roles:
+      - stacklight_log_leader_v2
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    log02.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: stacklight_log_node02
+      roles:
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    log03.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: stacklight_log_node03
+      roles:
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    cmn01.released-bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: ceph_mon_node01
+      roles:
+      - ceph_mon
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    cmn02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: ceph_mon_node02
+      roles:
+      - ceph_mon
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    cmn03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: ceph_mon_node03
+      roles:
+      - ceph_mon
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    rgw01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: ceph_rgw_node01
+      roles:
+      - ceph_rgw
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    rgw02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: ceph_rgw_node02
+      roles:
+      - ceph_rgw
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    rgw03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: ceph_rgw_node03
+      roles:
+      - ceph_rgw
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    mdb01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_telemetry_node01
+      roles:
+      - linux_system_codename_xenial
+      - openstack_telemetry
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    mdb02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_telemetry_node02
+      roles:
+      - linux_system_codename_xenial
+      - openstack_telemetry
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    mdb03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_telemetry_node03
+      roles:
+      - linux_system_codename_xenial
+      - openstack_telemetry
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    dns01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_dns_node01
+      roles:
+      - openstack_dns
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    dns02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_dns_node02
+      roles:
+      - openstack_dns
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    kmn01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_barbican_node01
+      roles:
+      - openstack_barbican
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    kmn02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_barbican_node02
+      roles:
+      - openstack_barbican
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    kmn03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_barbican_node03
+      roles:
+      - openstack_barbican
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    bmt01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_baremetal_node01
+      roles:
+      - openstack_baremetal
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+        ens4:
+          role: single_baremetal
+
+    bmt02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_baremetal_node02
+      roles:
+      - openstack_baremetal
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+        ens4:
+          role: single_baremetal
+
+    bmt03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_baremetal_node03
+      roles:
+      - openstack_baremetal
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+        ens4:
+          role: single_baremetal
+
diff --git a/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/salt.yaml b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/salt.yaml
new file mode 100644
index 0000000..890e742
--- /dev/null
+++ b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/salt.yaml
@@ -0,0 +1,85 @@
+{% set HOSTNAME_CFG01='cfg01.released-bm-b300-cicd-queens-ovs-maas.local' %}
+{% set LAB_CONFIG_NAME='released-bm-b300-cicd-queens-ovs-maas' %}
+{% set DOMAIN_NAME='released-bm-b300-cicd-queens-ovs-maas.local' %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-test-tools.yaml' as SHARED_TEST_TOOLS with context %}
+{% import 'shared-workarounds.yaml' as SHARED_WORKAROUNDS with context %}
+{% import 'shared-maas.yaml' as SHARED_MAAS with context %}
+
+- description: Wait for salt-master is ready after configdrive user-data
+  cmd: |
+    timeout 120 salt-call test.ping
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: Generate a public key for machines in MAAS
+  cmd: |
+    ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub
+    maas mirantis sshkeys create key="$(cat ~root/.ssh/id_rsa.pub)"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Run comissioning of BM nodes
+  cmd: |
+    salt-call maas.process_machines
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Wait for machines ready
+  cmd: |
+    salt-call maas.machines_status && timeout 120 salt-call state.sls maas.machines.wait_for_ready
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 7, delay: 5}
+  skip_fail: false
+
+- description: Enforce the interfaces configuration defined in the model for servers
+  cmd: |
+    salt-call state.sls maas.machines.assign_ip;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Assign drive partitions to machines
+  cmd: salt-call state.sls maas.machines.storage
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Remove all the salt-minions and re-register the cfg01 minion
+  cmd: |
+    salt-key -y -D;
+    salt-call test.ping
+    sleep 5
+    # Check that the cfg01 is registered
+    salt-key | grep cfg01
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: provision the automatically commissioned physical nodes through MAAS
+  cmd: |
+    salt-call maas.deploy_machines;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: Wait for machines deployed
+  cmd: |
+    salt-call maas.machines_status && timeout 300 salt-call state.sls maas.machines.wait_for_deployed
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 6, delay: 5}
+  skip_fail: false
+
+{{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
+{{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
+{{ SHARED_WORKAROUNDS.CLEAR_CEPH_OSD_DRIVES() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/tempest_skip.list b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/tempest_skip.list
new file mode 100644
index 0000000..c9c567a
--- /dev/null
+++ b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/tempest_skip.list
@@ -0,0 +1,30 @@
+# Globally disabled inside the 'ci-tempest' docker image
+tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container
+tempest.api.object_storage.test_healthcheck.HealthcheckTest.test_get_healthcheck
+tempest.api.object_storage.test_container_sync_middleware.ContainerSyncMiddlewareTest.test_container_synchronization
+tempest.api.object_storage.test_crossdomain.CrossdomainTest.test_get_crossdomain_policy
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_value_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_name_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_exceeds_overall_metadata_count
+tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value
+tempest.api.object_storage.test_account_services_negative.AccountNegativeTest.test_list_containers_with_non_authorized_user
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota\b
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object\b
+tempest.api.object_storage.test_account_quotas_negative.AccountQuotasNegativeTest.test_user_modify_quota
+
+# PROD-22111 Need to align integration CI labs configuration to pass Tempest tests with WaitCondition
+heat_tempest_plugin.tests.functional.test_os_wait_condition.OSWaitCondition.test_create_stack_with_multi_signal_waitcondition
+heat_tempest_plugin.tests.scenario.test_server_cfn_init.CfnInitIntegrationTest.test_server_cfn_init\b
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_raw
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+heat_tempest_plugin.tests.scenario.test_autoscaling_lbv2.AutoscalingLoadBalancerv2Test.test_autoscaling_loadbalancer_neutron
+
+# PROD-29650 failed with PortNotFound
+tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces_by_fixed_ip
+
+# PROD-33000 [OC][Infra] Instances don't have access to external net
+# tempest.api.compute.volumes.test_attach_volume.AttachVolumeShelveTestJSON.test_detach_volume_shelved_or_offload_server\b
+# tempest.api.compute.volumes.test_attach_volume.AttachVolumeShelveTestJSON.test_attach_volume_shelved_or_offload_server\b
+# tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_suspend_resume\b
diff --git a/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml
new file mode 100644
index 0000000..2ebdf1f
--- /dev/null
+++ b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml
@@ -0,0 +1,82 @@
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ssh_pwauth: True
+users:
+  - name: root
+    sudo: ALL=(ALL) NOPASSWD:ALL
+    shell: /bin/bash
+  - name: jenkins
+    sudo: ALL=(ALL) NOPASSWD:ALL
+    shell: /bin/bash
+    ssh_authorized_keys:
+      - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFSxeuXh2sO4VYL8N2dlNFVyNcr2RvoH4MeDD/cV2HThfU4/BcH6IOOWXSDibIU279bWVKCL7QUp3mf0Vf7HPuyFuC12QM+l7MwT0jCYh5um3hmAvM6Ga0nkhJygHexe9/rLEYzZJkIjP9/IS/YXSv8rhHg484wQ6qkEuq15nyMqil8tbDQCq0XQ+AWEpNpIa4pUoKmFMsOP8lq10KZXIXsJyZxizadr6Bh4Lm9LWrk8YCw7qP3rmgWxK/s8qXQh1ISZe6ONfcmk6p03qbh4H3CwKyWzxmnIHQvE6PgN/O+PuAZj3PbR2mkkJjYX4jNPlxvj8uTStaVPhAwfR9Spdx jenkins@cz8133
+
+disable_root: false
+chpasswd:
+  list: |
+    root:r00tme
+    jenkins:qalab
+  expire: False
+
+packages:
+  - openjdk-8-jre-headless
+  - libyaml-dev
+  - libffi-dev
+  - libvirt-dev
+  - python-dev
+  - python-pip
+  - python-virtualenv
+  #- python-psycopg2
+  - pkg-config
+  - vlan
+  - bridge-utils
+  - ebtables
+
+bootcmd:
+  # Enable root access
+  - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+  - service sshd restart
+output:
+  all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+runcmd:
+  # Create swap
+  - fallocate -l 16G /swapfile
+  - chmod 600 /swapfile
+  - mkswap /swapfile
+  - swapon /swapfile
+  - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+  - find /etc/network/interfaces.d/ -type f -delete
+  - kill $(pidof /sbin/dhclient) || /bin/true
+  - ip a flush dev ens3
+  - ip a flush dev ens4
+  - rm -f /var/run/network/ifstate.ens3
+  - rm -f /var/run/network/ifstate.ens4
+  - ip route delete default || /bin/true
+  - ifup ens3
+  - ifup ens4
+
+write_files:
+  - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+    content: |
+      GRUB_RECORDFAIL_TIMEOUT=30
+      GRUB_TIMEOUT=3
+      GRUB_TIMEOUT_STYLE=menu
+
+  - path: /etc/network/interfaces
+    content: |
+      auto ens3
+      iface ens3 inet static
+      address $management_static_ip
+      netmask 255.255.254.0
+      gateway $management_gw
+      dns-nameservers $dnsaddress
+
+      auto ens4
+      iface ens4 inet static
+      address $control_static_ip
+      netmask 255.255.254.0
+
+  - path: /etc/bash_completion.d/fuel_devops30_activate
+    content: |
+      source /home/jenkins/fuel-devops30/bin/activate
diff --git a/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/underlay-userdata.yaml b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/underlay-userdata.yaml
new file mode 100644
index 0000000..bb6338c
--- /dev/null
+++ b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/underlay-userdata.yaml
@@ -0,0 +1,81 @@
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ssh_pwauth: True
+users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+
+disable_root: false
+chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+runcmd:
+   - if lvs vg0; then pvresize $(pvdisplay -C -S vg_name=vg0 -o pv_name --noheadings | tail -n1); fi
+   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo resolvconf -u
+   #- sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 16G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /usr/share/growlvm/image-layout.yml
+     content: |
+       root:
+         size: '65%VG'
+       home:
+         size: '1%VG'
+       var_log:
+         size: '10%VG'
+       var_log_audit:
+         size: '5%VG'
+       var_tmp:
+         size: '10%VG'
+       tmp:
+         size: '5%VG'
+     owner: root:root
+
+growpart:
+    mode: auto
+    devices:
+      - '/'
+      - '/dev/vda3'
+      - '/dev/vdb3'
+      - '/dev/vdc3'
+      - '/dev/vdd3'
+    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/underlay.hot b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/underlay.hot
new file mode 100644
index 0000000..f2ce3f0
--- /dev/null
+++ b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/underlay.hot
@@ -0,0 +1,143 @@
+---
+
+heat_template_version: queens
+
+description: MCP environment for released-bm-b300-cicd-queens-ovs-maas
+
+parameters:
+  instance_domain:
+    type: string
+    default: released-bm-b300-cicd-queens-ovs-maas.local
+  mcp_version:
+    type: string
+  env_name:
+    type: string
+  key_pair:
+    type: string
+  cfg_flavor:
+    type: string
+  foundation_image:
+    type: string
+  foundation_flavor:
+    type: string
+  bm_availability_zone:
+    type: string
+  control_subnet_cidr:
+    type: string
+    default: "10.167.11.0/23"
+  tenant_subnet_cidr:
+    type: string
+    default: "10.167.12.0/23"
+  external_subnet_cidr:
+    type: string
+    default: "172.17.42.0/26"
+  management_subnet_cidr:
+    type: string
+    default: "172.16.180.0/23"
+  management_subnet_cfg01_ip:
+    type: string
+    default: 172.16.180.2
+  management_subnet_gateway_ip:
+    type: string
+    default: 172.16.180.1
+  management_subnet_pool_start:
+    type: string
+    default: 172.16.180.3
+  management_subnet_pool_end:
+    type: string
+    default: 172.16.180.61
+  salt_master_control_ip:
+    type: string
+    default: 10.167.11.5
+  deploy_empty_node:
+    type: boolean
+    default: False
+
+resources:
+  subnets:
+    type: MCP::Subnets
+    properties:
+      stack_name: { get_param: "OS::stack_name" }
+      env_name: { get_param: env_name }
+      management_net: 'system-phys-2401'
+      control_net: 'system-phys-2404'
+      tenant_net: 'system-phys-2406'
+      external_net: 'system-phys-2403'
+      control_subnet_cidr: { get_param: control_subnet_cidr }
+      tenant_subnet_cidr: { get_param: tenant_subnet_cidr }
+      external_subnet_cidr: { get_param: external_subnet_cidr }
+      management_subnet_cidr: { get_param: management_subnet_cidr }
+      management_subnet_gateway_ip: { get_param: management_subnet_gateway_ip }
+      management_subnet_pool_start: { get_param: management_subnet_pool_start }
+      management_subnet_pool_end: { get_param: management_subnet_pool_end }
+
+  #flavors:
+  #  type: MCP::Flavors
+
+  cfg01_node:
+    type: MCP::MasterNode
+    depends_on: [subnets]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      cfg01_flavor: re.jenkins.slave.large
+      availability_zone: { get_param: bm_availability_zone }
+      management_net: 'system-phys-2401'
+      control_net: 'system-phys-2404'
+      tenant_net: 'system-phys-2406'
+      external_net: 'system-phys-2403'
+      salt_master_control_ip: { get_param: salt_master_control_ip }
+      management_subnet_cfg01_ip: { get_param: management_subnet_cfg01_ip }
+      tenant_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, tenant_net_prefix] }, '5' ]
+      external_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, external_net_prefix] }, '5' ]
+      instance_name: cfg01
+      instance_domain: {get_param: instance_domain}
+
+  foundation_node:
+    type: MCP::FoundationNode
+    depends_on: [subnets]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance_name: foundation
+      availability_zone: { get_param: bm_availability_zone }
+      management_net: 'system-phys-2401'
+      control_net: 'system-phys-2404'
+      tenant_net: 'system-phys-2406'
+      external_net: 'system-phys-2403'
+      management_subnet_gateway_ip: { get_param: management_subnet_gateway_ip }
+      instance_image: { get_param: foundation_image }
+      instance_flavor: {get_param: foundation_flavor}
+      underlay_userdata: { get_file: ./underlay--user-data-foundation.yaml }
+      management_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, management_net_prefix] }, '251' ]
+      control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, control_net_prefix] }, '6' ]
+      tenant_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, tenant_net_prefix] }, '6' ]
+      external_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, external_net_prefix] }, '6' ]
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+outputs:
+  foundation_public_ip:
+    description: foundation node IP address (management)
+    value:
+      get_attr:
+      - foundation_node
+      - instance_address
+...
diff --git a/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/vcmp.yml b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/vcmp.yml
new file mode 100644
index 0000000..2e0847a
--- /dev/null
+++ b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/vcmp.yml
@@ -0,0 +1,56 @@
+classes:
+- system.nova.compute.cluster
+- system.nova.compute.notification.messagingv2
+- system.ceilometer.agent.telemetry.cluster
+- system.ceilometer.agent.polling.default
+- system.nova.compute.libvirt.ssl
+- system.nova.compute.libvirt.ssl.vnc
+- system.prometheus.exporters.libvirt
+- system.neutron.compute.cluster
+- system.nova.compute.storage.ceph
+- system.ceph.common.cluster
+- cluster.released-bm-b300-cicd-queens-ovs-maas.ceph.common
+- cluster.released-bm-b300-cicd-queens-ovs-maas.ceph.keyrings.cinder
+- cluster.released-bm-b300-cicd-queens-ovs-maas.ceph.keyrings.nova
+- system.linux.network.dynamic_hosts
+- cluster.released-bm-b300-cicd-queens-ovs-maas.infra
+###
+- cluster.released-bm-b300-cicd-queens-ovs-maas.openstack.networking.vcmpcompute
+parameters:
+  _param:
+    cluster_vip_address: ${_param:openstack_control_address}
+    cluster_local_address: ${_param:single_address}
+    cluster_node01_hostname: ${_param:openstack_control_node01_hostname}
+    cluster_node01_address: ${_param:openstack_control_node01_address}
+    cluster_node02_hostname: ${_param:openstack_control_node02_hostname}
+    cluster_node02_address: ${_param:openstack_control_node02_address}
+    cluster_node03_hostname: ${_param:openstack_control_node03_hostname}
+    cluster_node03_address: ${_param:openstack_control_node03_address}
+    nova_vncproxy_url: https://${_param:cluster_public_host}:6080
+    galera_ssl_enabled: true
+    openstack_mysql_x509_enabled: True
+    rabbitmq_ssl_enabled: True
+    openstack_rabbitmq_port: 5671
+    openstack_rabbitmq_x509_enabled: True
+    openstack_rabbitmq_port: 5671
+    primary_first_nic: eth1
+    primary_second_nic: eth2
+    ceilometer_agent_ssl_enabled: True
+    cluster_internal_protocol: https
+  nova:
+    compute:
+      reserved_host_memory_mb: 1100
+      workaround:
+        disable_libvirt_livesnapshot: True
+# Set rp_filter to Loose mode to allow requests from VM`s floating address to exported Manila share in tenant network
+# For Ironic deployments it allows Ironic provisioned nodes to reach iscsi target in boot from volume cases
+  linux:
+    system:
+      kernel:
+        sysctl:
+          net.ipv4.conf.all.rp_filter: 2
+## Best practice configure high speed interface for live migrations
+#  nova:
+#    libvirt:
+#      migration_inbound_addr: ${_param:single_address}
+
diff --git a/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/vcmpcompute.yml b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/vcmpcompute.yml
new file mode 100644
index 0000000..34d58c7
--- /dev/null
+++ b/tcp_tests/templates/released-bm-b300-cicd-queens-ovs-maas/vcmpcompute.yml
@@ -0,0 +1,64 @@
+parameters:
+  _param:
+    management_interface: ens2
+    primary_interface: ens3
+    deploy_interface: ens4
+    tenant_interface: ens5
+    external_interface: ens6
+  linux:
+    network:
+      bridge: openvswitch
+      interface:
+        management_int:
+          enabled: true
+          name: ${_param:management_interface}
+          proto: dhcp
+          type: eth
+        deploy_int:
+          enabled: true
+          name: ${_param:deploy_interface}
+          proto: dhcp
+          type: eth
+        primary_int:
+          enabled: true
+          name: ${_param:primary_interface}
+          proto: manual
+          type: eth
+          ipflush_onchange: true
+          restart_on_ipflush: true
+        tenant_int:
+          enabled: true
+          name: ${_param:tenant_interface}
+          proto: manual
+          type: eth
+          ipflush_onchange: true
+          restart_on_ipflush: true
+        external_int:
+          enabled: true
+          name: ${_param:external_interface}
+          proto: manual
+          ovs_port_type: OVSPort
+          ovs_bridge: br-floating
+          type: ovs_port
+          bridge: br-floating
+          ipflush_onchange: true
+          restart_on_ipflush: true
+        br-ctl:
+          enabled: true
+          type: bridge
+          proto: static
+          address: ${_param:single_address}
+          netmask: ${_param:control_network_netmask}
+          use_interfaces:
+          - ${_param:primary_interface}
+        br-mesh:
+          enabled: true
+          type: bridge
+          proto: static
+          address: ${_param:tenant_address}
+          netmask: ${_param:tenant_network_netmask}
+          use_interfaces:
+          - ${_param:tenant_interface}
+        br-floating:
+          enabled: true
+          type: ovs_bridge
diff --git a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay--user-data-foundation.yaml b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay--user-data-foundation.yaml
index 1677dcd..b1ef08b 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay--user-data-foundation.yaml
+++ b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay--user-data-foundation.yaml
@@ -41,7 +41,7 @@
 
 runcmd:
   # Create swap
-  - fallocate -l 16G /swapfile
+  - fallocate -l 2G /swapfile
   - chmod 600 /swapfile
   - mkswap /swapfile
   - swapon /swapfile
diff --git a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay.hot b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay.hot
index 8fc50af..726ee09 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay.hot
+++ b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay.hot
@@ -127,7 +127,7 @@
       env_name: { get_param: env_name }
       mcp_version: { get_param: mcp_version }
       cfg01_flavor: { get_param: cfg_flavor }
-      availability_zone: { get_param: bm_availability_zone }
+      availability_zone: { get_param: vm_availability_zone }
       management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
       control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
       tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
@@ -932,7 +932,7 @@
       instance_name: foundation
       instance_image: { get_param: foundation_image }
       instance_flavor: {get_param: foundation_flavor}
-      availability_zone: { get_param: bm_availability_zone }
+      availability_zone: { get_param: vm_availability_zone }
       management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
       control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
       tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
diff --git a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay--user-data-foundation.yaml b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay--user-data-foundation.yaml
index 1677dcd..b1ef08b 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay--user-data-foundation.yaml
+++ b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay--user-data-foundation.yaml
@@ -41,7 +41,7 @@
 
 runcmd:
   # Create swap
-  - fallocate -l 16G /swapfile
+  - fallocate -l 2G /swapfile
   - chmod 600 /swapfile
   - mkswap /swapfile
   - swapon /swapfile
diff --git a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay.hot b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay.hot
index 95fc69e..07f30aa 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay.hot
+++ b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay.hot
@@ -130,7 +130,7 @@
       env_name: { get_param: env_name }
       mcp_version: { get_param: mcp_version }
       cfg01_flavor: { get_param: cfg_flavor }
-      availability_zone: { get_param: bm_availability_zone }
+      availability_zone: { get_param: vm_availability_zone }
       management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
       control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
       tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
@@ -678,7 +678,7 @@
       instance_name: foundation
       instance_image: { get_param: foundation_image }
       instance_flavor: {get_param: foundation_flavor}
-      availability_zone: { get_param: bm_availability_zone }
+      availability_zone: { get_param: vm_availability_zone }
       management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
       control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
       tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
diff --git a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/tempest_skip.list b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/tempest_skip.list
index 2a89da1..9d9d435 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/tempest_skip.list
+++ b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/tempest_skip.list
@@ -66,4 +66,29 @@
 tempest.scenario.test_snapshot_pattern.TestSnapshotPattern.test_snapshot_pattern
 tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_volume_boot_pattern
 
+# PROD-25940 for contrail only
+tempest.api.network.admin.test_quotas.QuotasTest.test_quotas\b
+
+# PROD-33719 for contrail only
+tempest.api.network.admin.test_routers.RoutersAdminTest.test_update_router_set_gateway
+tempest.api.network.admin.test_routers.RoutersIpV6AdminTest.test_update_router_set_gateway
+
+# PROD-25128 [OC 4.x][Tempest] Parameter "strict_compliance" is False by default
+tempest.api.network.test_floating_ips_negative.FloatingIPNegativeTestJSON.test_associate_floatingip_port_ext_net_unreachable
+tempest.api.network.test_floating_ips_negative.FloatingIPNegativeTestJSON.test_create_floatingip_with_port_ext_net_unreachable
+
+# PROD-21671 [OpenContrail 4.0] Unable to update "subnet-id" for port (test_update_port_with_security_group_and_extra_attributes)
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_update_port_with_security_group_and_extra_attributes
+tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_security_group_and_extra_attributes
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_update_port_with_two_security_groups_and_extra_attributes
+tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_two_security_groups_and_extra_attributes
+
+# PROD-31179 Several tempest tests are failed on contrail configuration on checks for floating ip connectivity
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_port_security_macspoofing_port
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_update_router_admin_state
+
+# PROD-25586 [OC4.x][Tempest] Heat can't update port's mac address
+heat_tempest_plugin.tests.functional.test_create_update_neutron_port.UpdatePortTest.test_update_with_mac_address
+
 
diff --git a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay--user-data-foundation.yaml b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay--user-data-foundation.yaml
index 1677dcd..b1ef08b 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay--user-data-foundation.yaml
+++ b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay--user-data-foundation.yaml
@@ -41,7 +41,7 @@
 
 runcmd:
   # Create swap
-  - fallocate -l 16G /swapfile
+  - fallocate -l 2G /swapfile
   - chmod 600 /swapfile
   - mkswap /swapfile
   - swapon /swapfile
diff --git a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay.hot b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay.hot
index d9da3dd..4c12277 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay.hot
+++ b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay.hot
@@ -127,7 +127,7 @@
       env_name: { get_param: env_name }
       mcp_version: { get_param: mcp_version }
       cfg01_flavor: { get_param: cfg_flavor }
-      availability_zone: { get_param: bm_availability_zone }
+      availability_zone: { get_param: vm_availability_zone }
       management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
       control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
       tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
@@ -932,7 +932,7 @@
       instance_name: foundation
       instance_image: { get_param: foundation_image }
       instance_flavor: {get_param: foundation_flavor}
-      availability_zone: { get_param: bm_availability_zone }
+      availability_zone: { get_param: vm_availability_zone }
       management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
       control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
       tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
diff --git a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay-userdata.yaml b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay-userdata.yaml
index bb6338c..d998d47 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay-userdata.yaml
+++ b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay-userdata.yaml
@@ -36,7 +36,7 @@
    #- sudo route add default gw {gateway} {interface_name}
 
    # Create swap
-   - fallocate -l 16G /swapfile
+   - fallocate -l 2G /swapfile
    - chmod 600 /swapfile
    - mkswap /swapfile
    - swapon /swapfile
diff --git a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay.hot b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay.hot
index 88997d7..a82aaf0 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay.hot
+++ b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay.hot
@@ -130,7 +130,7 @@
       env_name: { get_param: env_name }
       mcp_version: { get_param: mcp_version }
       cfg01_flavor: { get_param: cfg_flavor }
-      availability_zone: { get_param: bm_availability_zone }
+      availability_zone: { get_param: vm_availability_zone }
       management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
       control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
       tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
@@ -678,7 +678,7 @@
       instance_name: foundation
       instance_image: { get_param: foundation_image }
       instance_flavor: {get_param: foundation_flavor}
-      availability_zone: { get_param: bm_availability_zone }
+      availability_zone: { get_param: vm_availability_zone }
       management_net: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
       control_net: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
       tenant_net: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 5621b02..200ae8d 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -454,6 +454,10 @@
     chmod 0600 /tmp/{{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | basename }}
     eval $(ssh-agent)
     ssh-add /tmp/{{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | basename }}
+
+    git config --global user.name {{ HOSTNAME_CFG01 }}
+    git config --global user.email {{ HOSTNAME_CFG01 }}@example.com
+
     export GIT_SSL_NO_VERIFY=true; git clone {{ COOKIECUTTER_TEMPLATES_REPOSITORY }} /root/cookiecutter-templates
 
     {%- if COOKIECUTTER_REF_CHANGE != '' %}
@@ -544,13 +548,8 @@
   cmd: |
     set -e;
     set -x;
-    {%- if SALT_MODELS_SYSTEM_REF_CHANGE != '' %}
-    pushd /srv/salt/reclass/classes/system/ && \
-    {%- for item in SALT_MODELS_SYSTEM_REF_CHANGE.split(" ") %}
-    git fetch {{ SALT_MODELS_SYSTEM_REPOSITORY }} {{ item }} && git cherry-pick FETCH_HEAD;
-    {%- endfor %}
-    popd;
-    {%- elif SALT_MODELS_SYSTEM_COMMIT != '' %}
+
+    {%- if SALT_MODELS_SYSTEM_COMMIT != '' %}
     pushd /srv/salt/reclass/classes/system/
     git checkout {{ SALT_MODELS_SYSTEM_COMMIT }};
     popd;
@@ -561,6 +560,14 @@
     popd;
     {%- endif %}
 
+    {%- if SALT_MODELS_SYSTEM_REF_CHANGE != '' %}
+    pushd /srv/salt/reclass/classes/system/ && \
+    {%- for item in SALT_MODELS_SYSTEM_REF_CHANGE.split(" ") %}
+    git fetch {{ SALT_MODELS_SYSTEM_REPOSITORY }} {{ item }} && git cherry-pick FETCH_HEAD;
+    {%- endfor %}
+    popd;
+    {%- endif %}
+
     {%- if IS_CONTRAIL_LAB %}
     export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/"
     # vSRX IPs for tcp-qa images have 172.16.10.90 hardcoded
diff --git a/tcp_tests/templates/shared-workarounds.yaml b/tcp_tests/templates/shared-workarounds.yaml
index 5e508fd..8f2d67b 100644
--- a/tcp_tests/templates/shared-workarounds.yaml
+++ b/tcp_tests/templates/shared-workarounds.yaml
@@ -29,4 +29,19 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 1}
   skip_fail: true
+{%- endmacro %}
+
+
+{%- macro DELETE_BOND0() %}
+{######################################}
+- description: |
+    Delete empty bond0 interface on kvm and osd nodes. RelatedProd:PROD-35758
+
+  cmd: |
+    set -x;
+    salt -C 'osd* or kvm*' cmd.run 'ip link delete bond0'
+    salt -C 'osd* or kvm*' file.write /etc/modprobe.d/bonding.conf "options bonding max_bonds=0"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: true
 {%- endmacro %}
\ No newline at end of file
diff --git a/tcp_tests/tests/system/conftest.py b/tcp_tests/tests/system/conftest.py
index 2ea36cf..0dabd4d 100644
--- a/tcp_tests/tests/system/conftest.py
+++ b/tcp_tests/tests/system/conftest.py
@@ -83,8 +83,7 @@
 
 
 def pytest_addoption(parser):
-    parser.addoption("--dont-switch-to-proposed",
-                     action="store_true",
-                     help="Skips switching Jenkins on cluster-under-test to "
-                          "the proposed branch before the applying "
-                          "the MCP updates")
+    parser.addoption("--update-to-tag",
+                     action="store",
+                     default=None,
+                     help="For mcp_update test")
diff --git a/tcp_tests/tests/system/test_backup_restore.py b/tcp_tests/tests/system/test_backup_restore.py
index 275f00c..47aff19 100644
--- a/tcp_tests/tests/system/test_backup_restore.py
+++ b/tcp_tests/tests/system/test_backup_restore.py
@@ -230,6 +230,17 @@
                 "classes", self.MAAS_YAML):
             reclass_actions.add_class(restore_class, self.MAAS_YAML)
 
+        backup_server = salt_actions.local(
+            "I@backupninja:server", "test.ping")['return'][0].keys()[0]
+        backup_server_ip = salt_actions.get_grains(
+            tgt='I@backupninja:server', grains='fqdn_ip4')[0][backup_server][0]
+
+        if 'backupninja_backup_host' not in reclass_actions.get_key(
+                "parameters._param", self.MAAS_YAML):
+            reclass_actions.add_key(
+                "parameters._param.backupninja_backup_host",
+                backup_server_ip, self.MAAS_YAML)
+
     @pytest.fixture
     def precreate_sm_test_files(self, underlay_actions, salt_actions):
         """Create test files before executing Salt Master backup
diff --git a/tcp_tests/tests/system/test_backup_restore_cassandra.py b/tcp_tests/tests/system/test_backup_restore_cassandra.py
index 5dd695c..a6170dd 100644
--- a/tcp_tests/tests/system/test_backup_restore_cassandra.py
+++ b/tcp_tests/tests/system/test_backup_restore_cassandra.py
@@ -41,10 +41,12 @@
     def is_network_restored(self, underlay_actions, network_name, cfg_node):
         get_net_by_name = underlay_actions.check_call(
             "source /root/keystonercv3 && " +
-            "openstack network list --name {}".format(network_name),
+            "openstack network show {}".format(network_name),
             node_name=cfg_node,
             raise_on_err=False)["stdout"]
-        return get_net_by_name != ['\n']
+        return get_net_by_name != "Error while executing command:" + \
+                                  " No Network found for " +\
+                                  "{}".format(network_name)
 
     @pytest.fixture()
     def handle_restore_params(self, reclass_actions):
@@ -148,13 +150,14 @@
         salt = salt_actions
         reclass = reclass_actions
         dt = drivetrain_actions
-        cfg_node = self.get_cfg_fqn(salt)
+        # cfg_node = self.get_cfg_fqn(salt)
+        ctl_node = underlay_actions.get_target_node_names(target='ctl')[0]
         fixture_network_name = "test1"
         jenkins_start_timeout = 60
         jenkins_build_timeout = 1800
 
         show_step(1)
-        self.create_network(underlay_actions, fixture_network_name, cfg_node)
+        self.create_network(underlay_actions, fixture_network_name, ctl_node)
         show_step(2)
         create_instant_backup(salt)
         show_step(3)
@@ -173,7 +176,7 @@
         network_presented = self.is_network_restored(
             underlay_actions,
             fixture_network_name,
-            cfg_node)
+            ctl_node)
         assert network_presented, \
             'Network {} is not restored'.format(fixture_network_name)
 
@@ -203,10 +206,11 @@
         """
         salt = salt_actions
         fixture_network_name = "backuptest2"
-        cfg_node = self.get_cfg_fqn(salt)
+        # cfg_node = self.get_cfg_fqn(salt)
+        ctl_node = underlay_actions.get_target_node_names(target='ctl')[0]
 
         show_step(1)
-        self.create_network(underlay_actions, fixture_network_name, cfg_node)
+        self.create_network(underlay_actions, fixture_network_name, ctl_node)
         show_step(2)
         create_instant_backup(salt)
         show_step(3)
@@ -244,7 +248,7 @@
         network_presented = self.is_network_restored(
             underlay_actions,
             fixture_network_name,
-            cfg_node)
+            ctl_node)
         assert network_presented, \
             'Network {} is not restored'.format(fixture_network_name)
         statuses_ok = True
diff --git a/tcp_tests/tests/system/test_ceph_luminous_upgrade.py b/tcp_tests/tests/system/test_ceph_luminous_upgrade.py
index bf7346c..11b4da5 100644
--- a/tcp_tests/tests/system/test_ceph_luminous_upgrade.py
+++ b/tcp_tests/tests/system/test_ceph_luminous_upgrade.py
@@ -7,6 +7,24 @@
 LOG = logger.logger
 
 
+@pytest.fixture
+def wa_cve_2021_20288_global_id_reclaim(reclass_actions,
+                                        salt_actions):
+    tgt = "I@ceph:common"
+    context_file = "cluster/*/ceph/common.yml"
+    if not reclass_actions.check_existence(
+            'parameters.ceph.common.config.mon.'
+            + 'auth_allow_insecure_global_id_reclaim'):
+        reclass_actions.add_bool_key(
+            'parameters.ceph.common.config.mon.'
+            + 'auth_allow_insecure_global_id_reclaim', "False", context_file)
+        salt_actions.run_state(tgt, "state.apply", "ceph.common")
+        salt_actions.cmd_run(tgt, "systemctl restart ceph-mon.target")
+    else:
+        LOG.info("Skipping WA ceph set auth_allow_insecure_global_id_reclaim")
+
+
+@pytest.mark.usefixtures("wa_cve_2021_20288_global_id_reclaim")
 class TestCephLuminousUpgrade(object):
 
     @pytest.mark.grab_versions
@@ -17,7 +35,7 @@
         """ Upgrade Ceph luminous to nautilus
 
         Scenario:
-            1. Chenge parameters in reclass
+            1. Change parameters in reclass
             2. Run Pipeline Ceph - upgrade
         https://docs.mirantis.com/mcp/master/mcp-operations-guide/ update-upgrade/major-upgrade/ceph-upgrade/upgrade-ceph.html
         """  # noqa: E501
diff --git a/tcp_tests/tests/system/test_ceph_operations.py b/tcp_tests/tests/system/test_ceph_operations.py
index b2f98b0..8845272 100644
--- a/tcp_tests/tests/system/test_ceph_operations.py
+++ b/tcp_tests/tests/system/test_ceph_operations.py
@@ -1,3 +1,5 @@
+import time
+
 import pytest
 
 from tcp_tests import logger
@@ -27,10 +29,11 @@
             node_name=cfg_node,
             raise_on_err=False)
     # Need to restart salt-minion service after accepting it in Salt Master
-    # underlay_actions.check_call(
-    #     "systemctl restart salt-minion",
-    #     node_name=xtra_node,
-    #     raise_on_err=False)
+    underlay_actions.check_call(
+        "systemctl restart salt-minion",
+        node_name=xtra_node,
+        raise_on_err=False)
+    time.sleep(15)
     # salt_actions.enforce_state("xtra*", "linux")
     # salt_actions.enforce_state("xtra*", "openssh")
 
@@ -429,20 +432,10 @@
         assert job_result == 'SUCCESS', job_description
 
 
-@pytest.mark.usefixtures("add_xtra_node_to_salt",
-                         "wa_prod36167")
-class TestCephMgr(object):
-    def test_add_node(self):
-        pass
-
-    def test_delete_node(self):
-        pass
-
-
 def build_node_config(node=''):
     """
 
-    :param node:  [osd, mon, rgw, mgr]
+    :param node:  [osd, mon, rgw]
     :return: string in yaml format
     """
 
@@ -498,8 +491,7 @@
         'NODE': node,
         'OSDSETTINGS': '',
         'MONSETTINGS': '',
-        'RGWSETTINGS': '',
-
+        'RGWSETTINGS': ''
     }
     # # ------------------OSD specific settings ----------
     if node == 'osd':
diff --git a/tcp_tests/tests/system/test_failover_ceph.py b/tcp_tests/tests/system/test_failover_ceph.py
index a89d711..02d7d28 100644
--- a/tcp_tests/tests/system/test_failover_ceph.py
+++ b/tcp_tests/tests/system/test_failover_ceph.py
@@ -13,6 +13,7 @@
 #    under the License.
 import pytest
 
+import time
 from devops.helpers import helpers
 from tcp_tests import logger
 
@@ -33,13 +34,14 @@
         'EXTRA_PARAMS': {
             'envs': [
                 "tests_set=-k "
-                "'not test_ceph_health and not test_prometheus_alert_count'"
+                "'not salt_master and not test_ceph_health and not "
+                "test_prometheus_alert_count'"
             ]
         }
     }
 
     JENKINS_START_TIMEOUT = 60
-    JENKINS_BUILD_TIMEOUT = 60 * 15
+    JENKINS_BUILD_TIMEOUT = 60 * 25
 
     def get_ceph_health(self, ssh, node_names):
         """Get Ceph health status on specified nodes
@@ -51,12 +53,36 @@
         """
         return {
             node_name: ssh.check_call(
-                "ceph -s",
+                "ceph health",
                 node_name=node_name,
                 raise_on_err=False)['stdout_str']
             for node_name in node_names
         }
 
+    def wait_healthy_ceph(self,
+                          ssh,
+                          node_names=None,
+                          time_sec=30):
+        ceph_health = ""
+        status = False
+
+        start_time = time.time()
+        while time.time() - start_time < time_sec and not status:
+            ceph_health = self.get_ceph_health(ssh, node_names)
+            status = all(["HEALTH_OK"
+                          in status
+                          for node, status
+                          in ceph_health.items()])
+            if status:
+                break
+            LOG.info("Retry getting ceph health because Ceph is unhealthy: {}"
+                     .format(ceph_health))
+            time.sleep(10)
+
+        error = "" if status \
+            else "Ceph health is not OK: {0}".format(ceph_health)
+        return status, error
+
     @pytest.mark.grab_versions
     @pytest.mark.restart_osd_node
     def test_restart_osd_node(
@@ -69,11 +95,9 @@
 
         Scenario:
         1. Find Ceph OSD nodes
-        2. Check Ceph cluster health before node restart (skipped until
-            PROD-31374 is fixed)
+        2. Check Ceph cluster health before node restart
         3. Restart 1 Ceph OSD node
-        4. Check Ceph cluster health after node restart (skipped until
-            PROD-31374 is fixed)
+        4. Check Ceph cluster health after node restart
         5. Run Tempest smoke test suite
         6. Run test_ceph_status.py::test_ceph_osd and
             test_services.py::test_check_services[osd] sanity tests
@@ -93,11 +117,9 @@
 
         # Check Ceph cluster health before node restart
         show_step(2)
-        ceph_health = self.get_ceph_health(ssh, osd_hosts)
-        # FIXME: uncomment the check once PROD-31374 is fixed
-        # status = all(
-        #     ["OK" in status for node, status in ceph_health.items()])
-        # assert status, "Ceph health is not OK: {0}".format(ceph_health)
+        result, error = self.wait_healthy_ceph(ssh=ssh,
+                                               node_names=osd_hosts)
+        assert result, error
 
         # Restart a Ceph OSD node
         show_step(3)
@@ -118,11 +140,10 @@
 
         # Check Ceph cluster health after node restart
         show_step(4)
-        ceph_health = self.get_ceph_health(ssh, osd_hosts)  # noqa
-        # FIXME: uncomment the check once PROD-31374 is fixed
-        # status = all(
-        #     ["OK" in status for node, status in ceph_health.items()])
-        # assert status, "Ceph health is not OK: {0}".format(ceph_health)
+        result, error = self.wait_healthy_ceph(ssh=ssh,
+                                               node_names=osd_hosts,
+                                               time_sec=120)
+        assert result, error
 
         # Run Tempest smoke test suite
         show_step(5)
@@ -165,11 +186,9 @@
 
         Scenario:
         1. Find Ceph CMN nodes
-        2. Check Ceph cluster health before node restart (skipped until
-            PROD-31374 is fixed)
+        2. Check Ceph cluster health before node restart
         3. Restart 1 Ceph CMN node
-        4. Check Ceph cluster health after node restart (skipped until
-            PROD-31374 is fixed)
+        4. Check Ceph cluster health after node restart
         5. Run Tempest smoke test suite
         6. Run test_ceph_status.py::test_ceph_replicas and
             test_services.py::test_check_services[cmn] sanity tests
@@ -189,11 +208,9 @@
 
         # Check Ceph cluster health before node restart
         show_step(2)
-        ceph_health = self.get_ceph_health(ssh, cmn_hosts)
-        # FIXME: uncomment the check once PROD-31374 is fixed
-        # status = all(
-        #     ["OK" in status for node, status in ceph_health.items()])
-        # assert status, "Ceph health is not OK: {0}".format(ceph_health)
+        result, error = self.wait_healthy_ceph(ssh=ssh,
+                                               node_names=cmn_hosts)
+        assert result, error
 
         # Restart a Ceph CMN node
         show_step(3)
@@ -214,11 +231,10 @@
 
         # Check Ceph cluster health after node restart
         show_step(4)
-        ceph_health = self.get_ceph_health(ssh, cmn_hosts) # noqa
-        # FIXME: uncomment the check once PROD-31374 is fixed
-        # status = all(
-        #     ["OK" in status for node, status in ceph_health.items()])
-        # assert status, "Ceph health is not OK: {0}".format(ceph_health)
+        result, error = self.wait_healthy_ceph(ssh=ssh,
+                                               node_names=cmn_hosts,
+                                               time_sec=120)
+        assert result, error
 
         # Run Tempest smoke test suite
         show_step(5)
@@ -261,11 +277,9 @@
 
         Scenario:
         1. Find Ceph RGW nodes
-        2. Check Ceph cluster health before node restart (skipped until
-            PROD-31374 is fixed)
+        2. Check Ceph cluster health before node restart
         3. Restart 1 Ceph RGW node
-        4. Check Ceph cluster health after node restart (skipped until
-            PROD-31374 is fixed)
+        4. Check Ceph cluster health after node restart
         5. Run Tempest smoke test suite
         6. Run test_services.py::test_check_services[rgw] sanity test
 
@@ -284,11 +298,9 @@
 
         # Check Ceph cluster health before node restart
         show_step(2)
-        ceph_health = self.get_ceph_health(ssh, rgw_hosts)
-        # FIXME: uncomment the check once PROD-31374 is fixed
-        # status = all(
-        #     ["OK" in status for node, status in ceph_health.items()])
-        # assert status, "Ceph health is not OK: {0}".format(ceph_health)
+        result, error = self.wait_healthy_ceph(ssh=ssh,
+                                               node_names=rgw_hosts)
+        assert result, error
 
         # Restart a Ceph RGW node
         show_step(3)
@@ -309,11 +321,11 @@
 
         # Check Ceph cluster health after node restart
         show_step(4)
-        ceph_health = self.get_ceph_health(ssh, rgw_hosts) # noqa
-        # FIXME: uncomment the check once PROD-31374 is fixed
-        # status = all(
-        #     ["OK" in status for node, status in ceph_health.items()])
-        # assert status, "Ceph health is not OK: {0}".format(ceph_health)
+        result, error = self.wait_healthy_ceph(ssh=ssh,
+                                               node_names=rgw_hosts,
+                                               time_sec=120)
+
+        assert result, error
 
         # Run Tempest smoke test suite
         show_step(5)
@@ -384,9 +396,9 @@
         # STEP #2
         show_step(2)
         # Get the ceph health output before restart
-        health_before = self.get_ceph_health(underlay, osd_node_names)
-        assert all(["OK" in p for n, p in health_before.items()]), (
-            "'Ceph health is not ok from node: {0}".format(health_before))
+        result, error = self.wait_healthy_ceph(ssh=underlay,
+                                               node_names=osd_node_names)
+        assert result, error
 
         # STEP #3
         show_step(3)
@@ -399,9 +411,10 @@
         # STEP #4
         show_step(4)
         # Get the ceph health output after restart
-        health_after = self.get_ceph_health(underlay, osd_node_names)
-        assert all(["OK" in p for n, p in health_before.items()]), (
-            "'Ceph health is not ok from node: {0}".format(health_after))
+        result, error = self.wait_healthy_ceph(ssh=underlay,
+                                               node_names=osd_node_names)
+
+        assert result, error
 
         rally.run_container()
 
@@ -451,9 +464,10 @@
         # STEP #2
         show_step(2)
         # Get the ceph health output before restart
-        health_before = self.get_ceph_health(underlay, cmn_node_names)
-        assert all(["OK" in p for n, p in health_before.items()]), (
-            "'Ceph health is not ok from node: {0}".format(health_before))
+        result, error = self.wait_healthy_ceph(ssh=underlay,
+                                               node_names=cmn_node_names)
+
+        assert result, error
 
         # STEP #3
         show_step(3)
@@ -466,9 +480,11 @@
         # STEP #4
         show_step(4)
         # Get the ceph health output after restart
-        health_after = self.get_ceph_health(underlay, cmn_node_names)
-        assert all(["OK" in p for n, p in health_before.items()]), (
-            "'Ceph health is not ok from node: {0}".format(health_after))
+        result, error = self.wait_healthy_ceph(ssh=underlay,
+                                               node_names=cmn_node_names,
+                                               time_sec=120)
+
+        assert result, error
 
         rally.run_container()
 
@@ -521,9 +537,9 @@
         # STEP #2
         show_step(2)
         # Get the ceph health output before restart
-        health_before = self.get_ceph_health(underlay, rgw_node_names)
-        assert all(["OK" in p for n, p in health_before.items()]), (
-            "'Ceph health is not ok from node: {0}".format(health_before))
+        result, error = self.wait_healthy_ceph(ssh=underlay,
+                                               node_names=rgw_node_names)
+        assert result, error
 
         # STEP #3
         show_step(3)
@@ -536,9 +552,10 @@
         # STEP #4
         show_step(4)
         # Get the ceph health output after restart
-        health_after = self.get_ceph_health(underlay, rgw_node_names)
-        assert all(["OK" in p for n, p in health_before.items()]), (
-            "'Ceph health is not ok from node: {0}".format(health_after))
+        result, error = self.wait_healthy_ceph(ssh=underlay,
+                                               node_names=rgw_node_names,
+                                               time_sec=120)
+        assert result, error
 
         rally.run_container()
 
diff --git a/tcp_tests/tests/system/test_mcp_update.py b/tcp_tests/tests/system/test_mcp_update.py
index 45e3cdd..74f2f95 100644
--- a/tcp_tests/tests/system/test_mcp_update.py
+++ b/tcp_tests/tests/system/test_mcp_update.py
@@ -67,14 +67,14 @@
 
 
 @pytest.fixture(scope='class')
-def dont_switch_to_proposed(request):
-    return request.config.getoption("--dont-switch-to-proposed")
+def update_to_tag(request):
+    return request.config.getoption("--update-to-tag")
 
 
 @pytest.fixture(scope='class')
 def switch_to_proposed_pipelines(reclass_actions, salt_actions,
-                                 dont_switch_to_proposed):
-    if dont_switch_to_proposed:
+                                 update_to_tag):
+    if update_to_tag:
         return True
 
     reclass = reclass_actions
@@ -125,6 +125,23 @@
 
 
 @pytest.fixture
+def wa_cve_2021_20288_global_id_reclaim(reclass_actions,
+                                        salt_actions):
+    tgt = "I@ceph:common"
+    context_file = "cluster/*/ceph/common.yml"
+    if not reclass_actions.check_existence(
+            'parameters.ceph.common.config.mon.'
+            + 'auth_allow_insecure_global_id_reclaim'):
+        reclass_actions.add_bool_key(
+            'parameters.ceph.common.config.mon.'
+            + 'auth_allow_insecure_global_id_reclaim', "False", context_file)
+        salt_actions.run_state(tgt, "state.apply", "ceph.common")
+        salt_actions.cmd_run(tgt, "systemctl restart ceph-mon.target")
+    else:
+        LOG.info("Skipping WA ceph set auth_allow_insecure_global_id_reclaim")
+
+
+@pytest.fixture
 def wa_for_alerta_password_prod35958(reclass_actions,
                                      salt_actions):
 
@@ -168,8 +185,10 @@
 
 
 @pytest.mark.usefixtures("switch_to_proposed_pipelines",
+                         "update_to_tag",
                          "wa_for_galera_clustercheck_password_prod35705",
-                         "wa_for_alerta_password_prod35958")
+                         "wa_for_alerta_password_prod35958",
+                         "wa_cve_2021_20288_global_id_reclaim")
 class TestUpdateMcpCluster(object):
     """
     Following the steps in
@@ -180,7 +199,7 @@
     @pytest.mark.parametrize("_", [settings.ENV_NAME])
     @pytest.mark.run_mcp_update
     def test_update_drivetrain(self, salt_actions, drivetrain_actions,
-                               show_step, _):
+                               show_step, update_to_tag, _):
         """Updating DriveTrain component to release/proposed/2019.2.0 version
 
         Scenario:
@@ -206,7 +225,8 @@
         show_step(2)
         job_name = 'git-mirror-downstream-mk-pipelines'
         job_parameters = {
-            'BRANCHES': 'release/proposed/2019.2.0'
+            'BRANCHES': '*' or
+                        'release/proposed/2019.2.0'
         }
         job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
@@ -219,7 +239,8 @@
         show_step(3)
         job_name = 'git-mirror-downstream-pipeline-library'
         job_parameters = {
-            'BRANCHES': 'release/proposed/2019.2.0'
+            'BRANCHES': '*' or
+                        'release/proposed/2019.2.0'
         }
         job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
@@ -233,9 +254,10 @@
 
         job_name = 'upgrade-mcp-release'
         job_parameters = {
-            'GIT_REFSPEC': 'release/proposed/2019.2.0',
-            'MK_PIPELINES_REFSPEC': 'release/proposed/2019.2.0',
-            'TARGET_MCP_VERSION': '2019.2.0',
+            'GIT_REFSPEC': update_to_tag or 'release/proposed/2019.2.0',
+            'MK_PIPELINES_REFSPEC':
+                update_to_tag or 'release/proposed/2019.2.0',
+            'TARGET_MCP_VERSION': update_to_tag or '2019.2.0',
             "DRIVE_TRAIN_PARAMS": {
                         "OS_DIST_UPGRADE": True,
                         "OS_UPGRADE": True,
@@ -499,7 +521,7 @@
         job_result, job_description = dt.start_job_on_jenkins(
             job_name='deploy-upgrade-rabbitmq',
             job_parameters=job_parameters,
-            build_timeout=40 * 60
+            build_timeout=50 * 60
         )
         assert job_result == 'SUCCESS', job_description
 
@@ -609,7 +631,7 @@
         job_parameters = {
             "TARGET_SERVERS": target,
             "OS_DIST_UPGRADE": True,
-            "UPGRADE_SALTSTACK": False,
+            "UPGRADE_SALTSTACK": True,
             "OS_UPGRADE": True,
             "INTERACTIVE": False}
         job_result, job_description = drivetrain_actions.start_job_on_jenkins(