Merge " Disable OVS DVR"
diff --git a/checklist.yaml b/checklist.yaml
index 107c75f..078bc8f 100644
--- a/checklist.yaml
+++ b/checklist.yaml
@@ -359,6 +359,13 @@
     status: ProdFailed
     defects: PROD-35761
 
+  - title: test_ceph_health
+    errors:
+      - client is using insecure global_id reclaim
+      - mons are allowing insecure global_id reclaim
+    status: ProdFailed
+    defects: PROD-36461
+
   - title: test_prometheus_alert_count[CephMonitorDownMinor]
     errors:
       - "1 of Ceph Monitors are down"
diff --git a/jobs/backups/backup-swarm-cookied-model-generator.sh b/jobs/backups/backup-swarm-cookied-model-generator.sh
new file mode 100644
index 0000000..a69b1cf
--- /dev/null
+++ b/jobs/backups/backup-swarm-cookied-model-generator.sh
@@ -0,0 +1,101 @@
+# export REMOTE_JENKINS='172.19.112.216'
+export REMOTE_JENKINS='sre-ci.scc.mirantis.net'
+
+if [ ! -n "$LAB_CONTEXT_NAME" ]; then
+  echo 'LAB_CONTEXT_NAME is not set!
+    This name will be used:
+    - to find the template file tcp_tests/templates/cookied-model-generator/salt_${LAB_CONTEXT_NAME}.yaml
+    - to set the salt model cluster name
+    - to make DOMAIN_NAME=${LAB_CONTEXT_NAME}.local
+  ';
+  exit 1
+fi
+
+
+if [ -n "$TCP_QA_REVIEW" ]; then
+	git fetch https://gerrit.mcp.mirantis.com/mcp/tcp-qa $TCP_QA_REVIEW && git checkout FETCH_HEAD
+fi
+
+. ${VENV_PATH}/bin/activate
+
+if [ "$ERASE_EXISTING_ENVIRONMENT" = "true" ]; then
+    dos.py erase $ENV_NAME || true
+fi
+
+cd tcp_tests
+
+rm -f ./*.tar.gz
+
+#MODEL_FILENAME=model_${CLUSTER_NAME}_${BUILD_NUMBER}.tar.gz
+MODEL_FILENAME=model_${ENV_NAME}.tar.gz
+if [[ -f $MODEL_FILENAME ]]; then
+   rm $MODEL_FILENAME
+fi
+
+####################
+# Generate the model
+
+py.test --junit-xml=deploy_generate_model.xml -k ${TEST_GROUP}
+
+
+########################################################
+# Upload the generated model to the repository on cz8133
+
+# Create a directory where the created model will be unpacked
+rm -rf ./model
+mkdir -p ./model
+
+eval $(ssh-agent)
+ssh-add ${SSH_PRIVATE_KEY}
+
+# Create remote repository
+ssh jenkins@${REMOTE_JENKINS} "
+  if [[ ! -d /home/jenkins/salt-models/${LAB_CONTEXT_NAME}.git ]]; then
+    mkdir -p /home/jenkins/salt-models/${LAB_CONTEXT_NAME}.git;
+    cd /home/jenkins/salt-models/${LAB_CONTEXT_NAME}.git;
+    git init --bare;
+    cp hooks/post-update.sample hooks/post-update;
+  fi"
+
+cd ./model
+
+# Prepare the repository in the ./model
+git init
+git remote add integration ssh://jenkins@${REMOTE_JENKINS}/home/jenkins/salt-models/${LAB_CONTEXT_NAME}.git
+git pull integration ${REPOSITORY_SUITE} || true
+git checkout -b ${REPOSITORY_SUITE}
+git rm -rf . || true   # remove previous content from the working dir
+
+# unpack new model into ./model dir
+tar -xzf ../${MODEL_FILENAME} -C ./
+# Add reclass-system submodule
+git submodule add https://gerrit.mcp.mirantis.com/salt-models/reclass-system classes/system/
+cd classes/system
+git checkout ${SALT_MODELS_SYSTEM_COMMIT}
+cd ../..
+
+# The directory "classes/service" is created at bootstrap time
+# and cannot be commited to the repository automatically.
+# It is not needed in the repo because contains only symlinks.
+cat << EOF > ./.gitignore
+classes/service
+EOF
+
+# create a commit with new model (if any change exists)
+git add -A
+git commit -m "${LAB_CONTEXT_NAME}@${REPOSITORY_SUITE} generated at $(date +%Y-%m-%d/%H-%M)
+
+REPOSITORY_SUITE=${REPOSITORY_SUITE}
+SALT_MODELS_SYSTEM_COMMIT=${SALT_MODELS_SYSTEM_COMMIT}
+COOKIECUTTER_TEMPLATE_COMMIT=${COOKIECUTTER_TEMPLATE_COMMIT}
+"
+
+# Push new commit to the ${REMOTE_JENKINS} repository
+git push integration ${REPOSITORY_SUITE}
+
+echo "######################################################################################################################"
+echo "#"
+echo "# Clone the model into the tests:  git clone http://${REMOTE_JENKINS}:8098/${LAB_CONTEXT_NAME}.git -b ${REPOSITORY_SUITE}"
+echo "#"
+echo "######################################################################################################################"
+
diff --git a/jobs/global.yaml b/jobs/global.yaml
index ea6deba..62adb50 100644
--- a/jobs/global.yaml
+++ b/jobs/global.yaml
@@ -4,7 +4,7 @@
       Do not edit this job through the web ! <br>
       Please use jenkins-job-builder in git <br>
       git clone ssh://gerrit.mcp.mirantis.com:29418/mcp/tcp-qa
-    current-version: 2019.2.15
-    previous-version: 2019.2.14
+    current-version: 2019.2.16
+    previous-version: 2019.2.15
     disabled-proposed: false
     disabled-2019-2-0: true
\ No newline at end of file
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index 463bc24..ce9f83f 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -148,19 +148,6 @@
         deploy(shared, common, steps, env_manager, batch_size, dist_upgrade_nodes, upgrade_saltstack)
         // run test stages
         test(shared, common, steps, env_manager)
-        //run upgrade env to proposed
-        if (env.RUN_UPGRADE_AFTER_JOB == "true") {
-            network_backend = env.PLATFORM_STACK_INSTALL.contains("contrail") ? 'contrail' : 'dvr'
-            upgrade_job = "mcp-update-${env.TEMPEST_IMAGE_VERSION}-${network_backend}-sl"
-              def deploy = build job: "${upgrade_job}",
-                  parameters: [
-                      string(name: 'PARENT_NODE_NAME', value: "openstack_slave_${env.LAB_CONFIG_NAME}"),
-                      string(name: 'TCP_QA_REFS', value: env.TCP_QA_REFS),
-                      string(name: 'NODE', value: "openstack_slave_${env.LAB_CONFIG_NAME}")
-                  ],
-                  wait: false,
-                  propagate: false
-        }
     } catch (e) {
         common.printMsg("Job is failed: " + e.message, "purple")
         throw e
@@ -223,7 +210,20 @@
                 } // node
             }
         }
-
+        //run upgrade env to proposed
+        if (env.RUN_UPGRADE_AFTER_JOB == "true") {
+            network_backend = env.PLATFORM_STACK_INSTALL.contains("contrail") ? 'contrail' : 'dvr'
+            upgrade_job = "mcp-update-${env.TEMPEST_IMAGE_VERSION}-${network_backend}-sl"
+              def deploy = build job: "${upgrade_job}",
+                  parameters: [
+                      string(name: 'PARENT_NODE_NAME', value: "openstack_slave_${env.LAB_CONFIG_NAME}"),
+                      string(name: 'TCP_QA_REFS', value: env.TCP_QA_REFS),
+                      string(name: 'TEMPEST_PATTERN', value: env.TEMPEST_PATTERN),
+                      string(name: 'NODE', value: "openstack_slave_${env.LAB_CONFIG_NAME}")
+                  ],
+                  wait: false,
+                  propagate: false
+        }
     } // try
   } // node
 
diff --git a/jobs/pipelines/run-test-scenarios.groovy b/jobs/pipelines/run-test-scenarios.groovy
index 31aff06..76d5cb2 100644
--- a/jobs/pipelines/run-test-scenarios.groovy
+++ b/jobs/pipelines/run-test-scenarios.groovy
@@ -44,10 +44,10 @@
                     if (env.RUN_TEST_OPTS) {
                         shared.swarm_run_pytest(steps, "${PARENT_NODE_NAME}", make_snapshot_stages)
                     } else {
-                        common.printMsg("RUN_TEST_OPTS is empty, skipping 'swarm-run-pytest' job", "green")
+                        common.infoMsg("RUN_TEST_OPTS is empty, skipping 'swarm-run-pytest' job", "green")
                     }
                 } catch (e) {
-                    common.printMsg("Tests are failed: " + e.message, "purple")
+                    common.infoMsg("Tests are failed: " + e.message, "purple")
                     currentBuild.result = 'FAILURE'
                 }
             } // stage("Run tests")
@@ -60,14 +60,14 @@
 
             stage("report results to testrail from jenkins master") {
                 if ("${env.REPORT_TO_TESTRAIL}" != "false") {
-                    common.printMsg("Running on: " + env.PARENT_NODE_NAME, "blue")
+                    common.infoMsg("Running on: " + env.PARENT_NODE_NAME, "blue")
                     shared.verbose_sh("""\
                            [ -d /home/jenkins/venv_testrail_reporter ] || virtualenv --python=python3.7 /home/jenkins/venv_testrail_reporter""", true, false, true)
                     shared.run_cmd("""\
                             . /home/jenkins/venv_testrail_reporter/bin/activate; pip install git+https://github.com/dis-xcom/testrail_reporter -U""")
                     shared.swarm_testrail_report(steps, env.PARENT_NODE_NAME)
                 } else {
-                    common.printMsg("REPORT_TO_TESTRAIL is set to FALSE. Skipped this step ")
+                    common.infoMsg("REPORT_TO_TESTRAIL is set to FALSE. Skipped this step ")
                 }
             } // stage("report results to testrail from jenkins master")
             stage("Store TestRail reports to job description") {
@@ -77,7 +77,7 @@
                         currentBuild.description += "${description}"
                     }
                 } else {
-                    common.printMsg("REPORT_TO_TESTRAIL is set to FALSE. Skipped this step ")
+                    common.infoMsg("REPORT_TO_TESTRAIL is set to FALSE. Skipped this step ")
                 }
             } // stage("Store TestRail reports to job description")
         } // dir
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index 1ebee5a..1771b5d 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -29,7 +29,7 @@
 
 currentBuild.description = "${PARENT_NODE_NAME}:${ENV_NAME}"
 
-timeout(time: 10, unit: 'HOURS') {
+timeout(time: 20, unit: 'HOURS') {
     timestamps {
         node ("${PARENT_NODE_NAME}") {
             if (! fileExists("${PARENT_WORKSPACE}")) {
diff --git a/jobs/project.yaml b/jobs/project.yaml
index 7aae28c..c2e37a7 100644
--- a/jobs/project.yaml
+++ b/jobs/project.yaml
@@ -22,6 +22,7 @@
       - heat-bm-cicd-queens-contrail-sl
       - bm-e7-cicd-pike-ovs-maas
       - bm-e7-cicd-pike-odl-maas
+      - bm-b300-cicd-queens-ovs-maas
       - released-bm-pike-ovs
       # --- Released envs ------
       - deploy-released:
diff --git a/jobs/templates/bm-b300-cicd-queens-ovs-maas.yml b/jobs/templates/bm-b300-cicd-queens-ovs-maas.yml
new file mode 100644
index 0000000..d07fc2d
--- /dev/null
+++ b/jobs/templates/bm-b300-cicd-queens-ovs-maas.yml
@@ -0,0 +1,222 @@
+- job-template:
+    project-type: pipeline
+    description: '{job-description}'
+    concurrent: true
+    disabled: false
+    name: bm-b300-cicd-queens-ovs-maas
+    parameters:
+    - string:
+        default: bm-b300-cicd-queens-ovs-maas
+        description: ''
+        name: LAB_CONFIG_NAME
+        trim: 'false'
+    - string:
+        default: core,kvm,cicd
+        description: Comma-separated list of stacks to deploy the drivetrain (salt
+          cluster and cicd nodes)
+        name: DRIVETRAIN_STACK_INSTALL
+        trim: 'false'
+    - string:
+        default: '24000'
+        description: ''
+        name: DRIVETRAIN_STACK_INSTALL_TIMEOUT
+        trim: 'false'
+    - string:
+        default: openstack,ovs,ceph,stacklight
+        description: Comma-separated list of stacks to deploy the target platform
+          (openstack and additional components)
+        name: PLATFORM_STACK_INSTALL
+        trim: 'false'
+    - string:
+        default: '24000'
+        description: ''
+        name: PLATFORM_STACK_INSTALL_TIMEOUT
+        trim: 'false'
+    - string:
+        default: 2019.2.0
+        description: ''
+        name: MCP_VERSION
+        trim: 'false'
+    - string:
+        default: sre-team-infra
+        description: ''
+        name: NODE_NAME
+        trim: 'false'
+    - string:
+        default: /home/jenkins/images/ubuntu-16-04-x64-mcp2019.2.0.qcow2
+        description: ''
+        name: MCP_IMAGE_PATH1604
+        trim: 'false'
+    - string:
+        default: /home/jenkins/images/cfg01-day01.qcow2
+        description: ''
+        name: IMAGE_PATH_CFG01_DAY01
+        trim: 'false'
+    - string:
+        default: cfg01.${{LAB_CONFIG_NAME}}-config-drive.iso
+        description: ISO name that will be generated and downloaded to the /home/jenkins/images/
+        name: CFG01_CONFIG_IMAGE_NAME
+        trim: 'false'
+    - string:
+        default: bm-b300-cicd-queens-ovs-maas
+        description: ''
+        name: ENV_NAME
+        trim: 'false'
+    - string:
+        default: ''
+        description: |-
+          Example: refs/changes/89/411189/36
+          (for now - only one reference allowed)
+        name: TCP_QA_REFS
+        trim: 'false'
+    - string:
+        default: refs/heads/release/proposed/2019.2.0
+        description: reference to patchset in pipeline-library
+        name: PIPELINE_LIBRARY_REF
+        trim: 'false'
+    - string:
+        default: refs/heads/release/proposed/2019.2.0
+        description: reference to patchset in mk-pipelines
+        name: MK_PIPELINES_REF
+        trim: 'false'
+    - string:
+        default: release/proposed/2019.2.0
+        description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
+          value
+        name: COOKIECUTTER_TEMPLATE_COMMIT
+        trim: 'false'
+    - string:
+        default: release/proposed/2019.2.0
+        description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
+          value
+        name: SALT_MODELS_SYSTEM_COMMIT
+        trim: 'false'
+    - string:
+        default: -m "run_cvp_func_sanity|run_cvp_tempest|run_stacklight"
+        description: |-
+          Pytest option -k or -m, with expression to select necessary tests.
+          Additional pytest options are allowed.
+        name: RUN_TEST_OPTS
+        trim: 'false'
+    - bool:
+        default: true
+        description: ''
+        name: SHUTDOWN_ENV_ON_TEARDOWN
+    - string:
+        default: ''
+        description: ''
+        name: COOKIECUTTER_REF_CHANGE
+        trim: 'false'
+    - string:
+        default: ''
+        description: ''
+        name: ENVIRONMENT_TEMPLATE_REF_CHANGE
+        trim: 'false'
+    - string:
+        default: '[MCP1.1_QUEENS]Tempest'
+        description: ''
+        name: TEMPEST_TEST_SUITE_NAME
+        trim: 'false'
+    - string:
+        default: queens
+        description: ''
+        name: TEMPEST_IMAGE_VERSION
+        trim: 'false'
+    - string:
+        default: proposed
+        description: ''
+        name: UPDATE_REPO_CUSTOM_TAG
+        trim: 'false'
+    - bool:
+        default: true
+        description: If set, reports will be created in TestRail for this build
+        name: REPORT_TO_TESTRAIL
+    - choice:
+        choices:
+        - heat
+        - devops
+        description: ''
+        name: ENV_MANAGER
+    - string:
+        default: https://ic-eu.ssl.mirantis.net:5000/v3
+        description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'
+        name: OS_AUTH_URL
+        trim: 'false'
+    - string:
+        default: sre-team
+        description: OpenStack project (tenant) name
+        name: OS_PROJECT_NAME
+        trim: 'false'
+    - string:
+        default: default
+        description: OpenStack user domain name
+        name: OS_USER_DOMAIN_NAME
+        trim: 'false'
+    - string:
+        default: sre-qa-ci-eu
+        description: Jenkins credentials ID with username and password to create a
+          heat stack in OpenStack
+        name: OS_CREDENTIALS
+        trim: 'false'
+    - string:
+        default: eu-cloud-low-flavors.env
+        description: |-
+          Heat template environment filename with 'parameter_defaults' dict, placed in tcp_tests/templates/_heat_environments/
+          , for example: microcloud-8116.env
+        name: LAB_PARAM_DEFAULTS
+        trim: 'false'
+    - string:
+        default: release/proposed/2019.2.0
+        description: ''
+        name: JENKINS_PIPELINE_BRANCH
+        trim: 'false'
+    - string:
+        default: refs/heads/release/proposed/2019.2.0
+        description: ''
+        name: MCP_COMMON_SCRIPTS_REFS
+        trim: 'false'
+    - string:
+        default: proposed
+        description: ''
+        name: UPDATE_VERSION
+        trim: 'false'
+    - string:
+        name: IPMI_CREDS
+        default: 'lab_engineer'
+    - string:
+        default: ''
+        description: ''
+        name: TEMPEST_EXTRA_ARGS
+        trim: 'false'
+    - password:
+        name: CISCO_PASS
+        default: '1fac0DlhILBo'
+    - string:
+        default: ''
+        description: ''
+        name: SALT_MODELS_SYSTEM_REF_CHANGE
+        trim: 'false'
+    - string:
+        default: ''
+        description: ''
+        name: BATCH_SIZE
+        trim: 'false'
+    - bool:
+        default: false
+        description: Whether to perform dist-upgrade on virtual nodes during deployment
+        name: DIST_UPGRADE_NODES
+    - bool:
+        default: false
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
+    pipeline-scm:
+      lightweight-checkout: false
+      scm:
+      - git:
+          branches:
+          - FETCH_HEAD
+          refspec: ${{TCP_QA_REFS}}
+          url: https://gerrit.mcp.mirantis.com/mcp/tcp-qa
+      script-path: jobs/pipelines/deploy-cicd-and-run-tests.groovy
+    logrotate:
+      daysToKeep: 365
diff --git a/jobs/templates/test-scenarios.yml b/jobs/templates/test-scenarios.yml
index 67d3880..70a384a 100644
--- a/jobs/templates/test-scenarios.yml
+++ b/jobs/templates/test-scenarios.yml
@@ -110,7 +110,7 @@
       - ceph_rgw-queens-dvr-sl:
           run-test-opts: '--maxfail=1 -k TestCephRgw'
           deployment: heat-cicd-queens-dvr-sl
-          display-name: Add/Remove MGR node
+          display-name: Add/Remove RGW node
     jobs:
       - '{test_scenario}'
     logrotate:
@@ -143,34 +143,36 @@
       tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_tempest \
       tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_func_sanity \
       tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_stacklight
-
+    test-pattern-with-contrail:
+      ^heat_tempest_plugin.tests*|^tempest.api.image*|^tempest_horizon*|^tempest.api.identity*|^tempest.api.network*|^tempest.api.compute*|^tempest.api.volume*|^tempest.scenario*|^tempest.api.object_storage*
     test_scenario:
       - mcp-update-pike-dvr-sl:
           deployment: released-heat-cicd-pike-dvr-sl
-          disabled: true
           run-test-opts: '{test-opt}'
+          tempest_pattern: 'tempest'
           display-name: MCP update (pike)
 
       - mcp-update-queens-dvr-sl:
           deployment: released-heat-cicd-queens-dvr-sl
-          disabled: true
+          tempest_pattern: 'tempest'
           run-test-opts: '{test-opt}'
           display-name: MCP update (queens)
 
       - mcp-update-pike-contrail-sl:
           deployment: released-heat-cicd-pike-contrail41-sl
-          disabled: true
+          tempest_pattern: '{test-pattern-with-contrail}'
           run-test-opts: '{test-opt-with-contrail}'
           display-name: MCP update (pike + OC)
 
       - mcp-update-queens-contrail-sl:
           deployment: released-heat-cicd-queens-contrail41-sl
-          disabled: true
+          tempest_pattern: '{test-pattern-with-contrail}'
           run-test-opts: '{test-opt-with-contrail}'
           display-name: MCP update (queens + OC)
 
       - os-update-pike-to-queens:
           deployment: heat-cicd-pike-dvr-sl
+          disabled: true
           run-test-opts: '-k TestUpdatePikeToQueens'
           display-name: Update Pike -> Queens
 
@@ -268,6 +270,12 @@
           Additional pytest options are allowed.
         name: RUN_TEST_OPTS
         trim: 'false'
+    - text:
+        default: '{tempest_pattern|}'
+        description: |-
+          Examples: 'set=full','set=smoke' or test name.
+        name: TEMPEST_PATTERN
+        trim: 'false'
     - bool:
         default: true
         name: REPORT_TO_TESTRAIL
diff --git a/jobs/view.yaml b/jobs/view.yaml
index c922ad9..69718b6 100644
--- a/jobs/view.yaml
+++ b/jobs/view.yaml
@@ -89,13 +89,16 @@
     filter-executors: true
     filter-queue: true
     job-name:
+      - deploy_bm
       - bm-cicd-pike-ovs-maas
       - bm-cicd-queens-ovs-maas
-      - deploy_bm
       - heat-bm-cicd-pike-contrail-sl
       - heat-bm-cicd-queens-contrail-sl
       - released-bm-pike-ovs
       - show_networks_used_by_libvirt
+      - bm-e7-cicd-pike-ovs-maas
+      - bm-e7-cicd-pike-odl-maas
+      - bm-b300-cicd-queens-ovs-maas
     columns:
       - status
       - weather
diff --git a/tcp_tests/managers/reclass_manager.py b/tcp_tests/managers/reclass_manager.py
index 7c75086..3952b5f 100644
--- a/tcp_tests/managers/reclass_manager.py
+++ b/tcp_tests/managers/reclass_manager.py
@@ -11,6 +11,7 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+import yaml
 from tcp_tests import logger
 from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
 
@@ -83,35 +84,21 @@
         :param file_name: name of YAML file to find a key
         :return: str, key if found
         """
-        LOG.info("Try to get '{key}' key from '{file}' file".format(
+        LOG.debug("Try to get '{key}' key from '{file}' file".format(
             file=file_name,
             key=key
             ))
         request_key = self.ssh.check_call(
-            "{reclass_tools} get-key {key} /srv/salt/reclass/*/{file_name}".
+            "{reclass_tools} get-key {key} "
+            "/srv/salt/reclass/classes/{file_name}".
             format(reclass_tools=self.reclass_tools_cmd,
                    key=key,
                    file_name=file_name))['stdout']
 
-        # Reclass-tools returns result to stdout, so we get it as
-        #     ['\n',
-        #      '---\n',
-        #      '# Found parameters._param.jenkins_pipelines_branch in \
-        #          /srv/salt/reclass/classes/cluster/../infra/init.yml\n',
-        #      'release/proposed/2019.2.0\n',
-        #      '...\n',
-        #      '\n']
-        # So we have no chance to get value without dirty code like `stdout[3]`
-
-        LOG.info("Raw output from reclass.get_key {}".format(request_key))
-        if len(request_key) < 4:
-            print("Can't find {key} in {file_name}. Got stdout {stdout}".
-                  format(key=key,
-                         file_name=file_name,
-                         stdout=request_key))
-            return None
-        value = request_key[3].strip('\n')
-        LOG.info("From reclass.get_key {}".format(value))
+        LOG.debug("Raw output from reclass.get_key {}".format(request_key))
+        encoded_request_key = ''.join(request_key).encode(encoding='UTF-8')
+        value = yaml.load(encoded_request_key)
+        LOG.info("From reclass.get_key {}: {}".format(key, value))
         return value
 
     def add_bool_key(self, key, value, short_path):
@@ -174,13 +161,18 @@
             May look like cluster/*/cicd/control/leader.yml
         :return: None
         """
-        self.ssh.check_call(
-            "{reclass_tools} del-key classes {value} \
-            /srv/salt/reclass/classes/{path}".format(
-                reclass_tools=self.reclass_tools_cmd,
+        current_content = self.get_key('classes', short_path)
+        if value not in current_content:
+            LOG.info("{value} not found in classes in {path}".format(
                 value=value,
                 path=short_path
             ))
+            return
+
+        new_content = current_content
+        new_content.remove(value)
+
+        self.add_key("classes", new_content, short_path)
 
     def delete_key(self, key, short_path):
         """
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 1f63ae9..fbbb9b8 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -16,6 +16,7 @@
 import pkg_resources
 
 from collections import defaultdict
+from polling import poll
 
 from datetime import datetime
 from pepper import libpepper
@@ -116,6 +117,10 @@
     def local_async(self, tgt, fun, args=None, kwargs=None, timeout=None):
         return self.api.local_async(tgt, fun, args, kwargs, timeout=timeout)
 
+    def runner(self, fun, args=None, **kwargs):
+        response = self.api.runner(fun, arg=args, **kwargs)
+        return response.get("return")
+
     def lookup_result(self, jid):
         return self.api.lookup_jid(jid)
 
@@ -194,6 +199,20 @@
         result = self.local(tgt=tgt, fun='grains.get', args=grains)
         return result['return']
 
+    def wait_jobs_completed(self, timeout=60, interval=5):
+        """
+        :param timeout: int, time seconds to wait
+        :param interval: int, time in second between attempts
+        :return: None
+        """
+        # TODO(harhipova) PROD-36434 : need to check that last job completed
+        # with successful result
+        poll(
+            target=lambda: self.runner('jobs.active')[0] == {},
+            timeout=timeout,
+            step=interval
+        )
+
     def get_ssh_data(self):
         """Generate ssh config for Underlay
 
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index 462fa77..194b275 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -23,6 +23,8 @@
 python-jenkins
 cmd2<0.9
 PyYAML!=5.1
+polling==0.3.2
+
 
 # For Queens: https://github.com/openstack/requirements/blob/stable/queens/global-requirements.txt
 python-heatclient>=1.10.0
diff --git a/tcp_tests/templates/_heat_environments/eu-cloud-low-flavors.env b/tcp_tests/templates/_heat_environments/eu-cloud-low-flavors.env
index 0d0c859..ed3e516 100644
--- a/tcp_tests/templates/_heat_environments/eu-cloud-low-flavors.env
+++ b/tcp_tests/templates/_heat_environments/eu-cloud-low-flavors.env
@@ -40,7 +40,7 @@
   vsrx_flavor: oc_vsrx
 
   key_pair: system_key_8133
-  bm_availability_zone: nova
+  bm_availability_zone: vlan-provider-net-az
   vm_availability_zone: nova
   net_public: public
 
diff --git a/tcp_tests/templates/_heat_environments/eu-cloud.env b/tcp_tests/templates/_heat_environments/eu-cloud.env
index 7b77db1..d5e0e5a 100644
--- a/tcp_tests/templates/_heat_environments/eu-cloud.env
+++ b/tcp_tests/templates/_heat_environments/eu-cloud.env
@@ -44,7 +44,7 @@
   vsrx_flavor: oc_vsrx
 
   key_pair: system_key_8133
-  bm_availability_zone: nova
+  bm_availability_zone: vlan-provider-net-az
   vm_availability_zone: nova
   net_public: public
 
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/encryption-key.asc b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/encryption-key.asc
new file mode 100644
index 0000000..381eb77
--- /dev/null
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/encryption-key.asc
@@ -0,0 +1,56 @@
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+
+lQcYBFyBRcUBEACmP/muUIwbEg6Z7dA3c9I2NadcjDHXKg/ViXwaOB4KSd9/FC0o
+KSBPccWb+1sm+zdUy2f/LC5r8RvU7yZd4Mbzz8R1DQncXg4nG7bppW7oAcHpc0jk
+pV/SvdMYxuXsrbKbpoGEquwVkbb4oTv2MLSBfGfFzBeQfiwGEWm1xPLSeXc4biLC
+FatCU7w4LS1U4BEOqRCp6lW/hQFLoX+j6rNT8TwC5AeFpKgUWuQZGOO4fZKpbvo2
+sCvF5VA1HSVXlZtzum6pL1yzLL/SoyLrYOy1KrZQmSBHG9htCZQVmvYK7U5WtWE4
+Ws5IAj+HwvgKyzXE2Srsirj1NqauQRsk+1riQk3rpDrX2BeXNUSoHR5M/RDY0gCc
+8P6heanQRnyFtjUSoovkQsydY77+QVxe0MCs+lZlg31fL+wJVG7FIbIKKwR5sj8i
+/JqhWE+t2ZzIrQ/7o7fRk7hv/u69Vb/t/Nt7fkbn53zoubqi3kNgXf6hwhTUtfW/
+lE9cc4JTzis4i/RnILUDnAwos1c0Z+tGCUo4pbiP71VfU8L259g+clPFXOIkqA9t
+L9JSZQfhH/lRj3Abs57OvZjN7/D1h8PWB+8nTB8bkoUt45SubgQb0Y9maWUcwjxw
+AcJSIk6mq8vVdBu7zOuslDjMnoUZbtJwcSwQQOnb9UUppjs3CjbcH80ttQARAQAB
+AA/9ExdprtDlJf6u2pJqxNNyInOK4p/e4VydMOJ28/PZz0iod8lzXhdK9JSWItF8
+qD9VHVG2gaErO44Wqh9EgqdbcYg8gUycA0hxy5/tI2uyDsaU5CAvEMLE/Eh8Q24j
+3UgdKK64VOnj7p4rKuYpIp55PB1zNU24rwkuOQwq3Yreb7kvLbXIHA2s+xLunGzj
+tcl9a/eSSFD2w+WcPnkvVT2QlmUvhQ12p6w++QdvBkrLa9ZPz1FFPp6AiFtLGK5e
+KW6uyV1xc9BSjujmpmPBkNIynKNpCFxtTn0uH2doMAr5kkuqIV726SfUZISNkyOa
+pHKtnAtsWHmdv9skzQIBAgAzcXTBGbdDxRj6QR+ohqbsCzfu3z9QHSbXUmxezti9
+bQqpsU1SIg8z2oDARFR6KlRzhnfpPvan+Gp9TvYsvxrXe61HpxRMdLj6Gt2Ibruf
+YHCtr1S9J5CzTTOurlIKpACUYIqgVXfgIkQzqiYX8e56PiDTUB++OqEg66i0orXB
+nbHAD2vu16CNvcaNqsak3DWkHMwmEfsuxqyUXNte0eYu9SCHtnNoYT/D7A72gK4b
+Gqg80J8ZCpo1ilIX3xUq8WsH+CoXs0X7hy6Cbi22AqnHFRYmrgoIWmRzJonp393b
+yqmTV+QsKQRpmwdX4hiH78zJLnLEUQMn8CuHAGwaJCzk4okIAMKNrIQZhkdbCCe4
+IrLuMKn4aQj3c22SMXNmu78/0cP9Rtsm3ChjzzelLO7NjvPm0nIvEcThFSIZIXCv
+iWGZCXFCKn3WtA5xWuMFNXsEQcc3AG/qRODdDSeFpo+VH/9IwppAc3zI2jxe1PRD
+G2DnheLaLIKgHunsCYxpftJDod/vRqRHeU7ulMVJfEKVxdzrCbKGiIOXSyS6KowQ
+JOxF/80ocq/25Zc/oH25Y2r/0y+xzDpOHBgU0ndrCZf2z8oOuECJTxcq83UDyJzT
+HrG/hTrU83YsQMZ0AwBrYxpzUfdH7b6y60VE19FrwmMDK6Fz8I/x4Ai0sNkI3QLR
+NntY9fsIANrB3QM8CtsdxXsFvdTEwNLsG8LMdn3loCH6Cq3ejkEKa69Uua+sB6ND
+wYOXWzyksLZJyfxIXux/hMlK/kO3ohGcEFiMUaDZndJy8IKUlDrhwcUZqm7dXMDU
+CIf0T3rOEzOXbNu3UTds3j/ruSvA5KmjzOa4Qnb41CyL5Fh7x0R8Rux3NzAn6Ecx
+Y+nAWRtI/Yz7zdL8zuHaJfbVuxAPJ+ImcXAS7cX6T9dM3tWRlam1+0Ezhdb4F8i5
+lcY7sMu95scDwhV7qOmln6wtGSkBPZgE0+TqRuELZrPvlcIRRIM42UwPWhYO2PG8
+kKd2i5teweDnhzN8+E87VV2BQhP9DA8H/0+ZiXsvaG60JGqNmWzVbB6U1qgwrFOR
+VcuzIWpdZyQR8Ok63GXuA0odoqReolba9R6fVlXchj6INBz2WY2F0twwCRPx7tRg
+Pyq4PaTA8ZYYjAVWVCd9k97gY2i80p4MPzQCnE8g4n6OWGY47pcTwSkm4HBoGoam
+igIRn3Soz7CXGF+PvSGi1T0jpwM5IWfM3IwEUPdPTIJuA2iD/9zSKDvhsP+trJ1Y
+TMe9CW3Llf5mFbHLRZ7LfMOLIngKOIxBAxHiT8wUrIRaH78wHdz8ALDsC+LNP6rK
+hKb8h/VHXaqmf0BlNjGpO7XZXfxXWJ0oTUG5Z+jKz2Ir14HYLZI1GlOA8bQlZXhh
+bXBsZS5jb20gPHNhbHQtbWFzdGVyQGV4YW1wbGUuY29tPokCTgQTAQgAOBYhBLaR
+Vrvqyq56MiGjUvXLKtw2FZsDBQJcgUXFAhsvBQsJCAcCBhUKCQgLAgQWAgMBAh4B
+AheAAAoJEPXLKtw2FZsDpi4P/1kmvlpkbOhrL73zAPyMzYa4Yo2Pi/BoMbyEKNKO
+K3wLCdP6xLGecVIt8pANosksDSGlWAnWj36/jfgt/aZisx1u6MTYaOEHkXahxOX4
+ghDW1cTbdtz7Uy5Ah9O3WNI+ejmOpCtuc3P/XOkdttKZLuCNCs6ocgCsejpNHcFK
+vMhOhnRKV8kcBrG2QLyfSyafBtM/zV+NR4Wrng71Za8fiXHlDanmrAIyuSnD538r
+hTwSFe0C9HntwuF6W+UShN7c+jPJaKQjKbZy9fuFp33NcTSPCB5dH9yrhQvOeFQo
+dFzEabMDFVGPfUVWR+TH39dWYOsq5zFmgQAbOB/vHdmEtrYNrxX0AiCZZHQHTUb9
+oBK68V8eVeFdoRLcMORBZ2RCqkQTOQoAF7o772knltjtsymnI0XNvVC/XCnZv89Q
+/eoivrd/rMMpTFOGcys6EAnSUWx0ZG/JCkezQqnx9U219BvqKNOZ60aOeOYHKpsX
+Ha8Nr72YRmtm0UMsDjEUyLOj+o06XnN7uafMv2bZpjWh2hfOrkAbxe41z6t+78ho
+P+C5vSvp01OmAt71iq+62MXVcLVKEWDpiuZSj8m83RlY5AGIaPaGX9LKPcHdGxKw
+QSczgB/jI3G08vWaq82he6UJuYexbYe1iJXfvcx8kThwZ1nXQJm+7UsISUsh8/NZ
+x0n/
+=uxDD
+-----END PGP PRIVATE KEY BLOCK-----
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
new file mode 100644
index 0000000..0ddb228
--- /dev/null
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -0,0 +1,876 @@
+default_context:
+  ironic_enabled: True
+  openstack_baremetal_hostname: bmt
+  openstack_baremetal_address_baremetal: 10.14.0.10
+  openstack_baremetal_node01_baremetal_address: 10.14.0.11
+  openstack_baremetal_node02_baremetal_address: 10.14.0.12
+  openstack_baremetal_node03_baremetal_address: 10.14.0.13
+  openstack_baremetal_node01_address: 10.167.11.21
+  openstack_baremetal_node02_address: 10.167.11.22
+  openstack_baremetal_node03_address: 10.167.11.23
+  openstack_baremetal_neutron_subnet_cidr: 10.14.0.0/24
+  openstack_baremetal_neutron_subnet_allocation_start: 10.14.0.100
+  openstack_baremetal_neutron_subnet_allocation_end: 10.14.0.200
+  openstack_baremetal_address: 10.167.11.20
+  openstack_baremetal_interface: ens7
+  openstack_baremetal_vip_interface: br_baremetal
+  jenkins_cfg_admin_password: r00tme
+  bmk_enabled: 'False'
+  cicd_control_node01_address: 10.167.11.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_address: 10.167.11.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_address: 10.167.11.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: 10.167.11.90
+  cicd_control_vip_hostname: cid
+  cicd_enabled: 'True'
+  cluster_domain: bm-b300-cicd-queens-ovs-maas.local
+  cluster_name: bm-b300-cicd-queens-ovs-maas
+  compute_bond_mode: active-backup
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: zEFbUBMME6LFdiL0rJWFgHMdQGgywnDSE9vFYvHgEBeYHb4QJsDl3HqpdaTgqYlF
+  control_network_netmask: 255.255.254.0
+  control_network_subnet: 10.167.11.0/23
+  control_vlan: '2404'
+
+  jenkins_pipelines_branch: 'release/2019.2.0'
+  deploy_network_gateway: 172.16.180.1
+  deploy_network_netmask: 255.255.254.0
+  deploy_network_subnet: 172.16.180.0/23
+  deployment_type: physical
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.224.6
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.11.241
+  infra_kvm01_deploy_address: 172.16.180.3
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.11.242
+  infra_kvm02_deploy_address: 172.16.180.4
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.11.243
+  infra_kvm03_deploy_address: 172.16.180.5
+  infra_kvm03_hostname: kvm03
+  infra_kvm04_control_address: 10.167.11.244
+  infra_kvm04_deploy_address: 172.16.180.6
+  infra_kvm04_hostname: kvm04
+  infra_kvm05_control_address: 10.167.11.245
+  infra_kvm05_deploy_address: 172.16.180.7
+  infra_kvm05_hostname: kvm05
+  infra_kvm06_control_address: 10.167.11.246
+  infra_kvm06_deploy_address: 172.16.180.8
+  infra_kvm06_hostname: kvm06
+  infra_kvm_vip_address: 10.167.11.240
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_enabled: 'True'
+  maas_deploy_address: 172.16.180.2
+  maas_deploy_cidr: 172.16.180.0/23
+  maas_deploy_gateway: 172.16.180.1
+  maas_deploy_range_end: 172.16.181.250
+  maas_deploy_range_start: 172.16.180.18
+  maas_dhcp_enabled: 'True'
+  maas_fabric_name: fabric-0
+  maas_hostname: cfg01
+  maas_manage_deploy_network: 'True'
+  maas_machines: |
+        kvm01: # #cz7625-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          # pxe_interface_mac:
+          pxe_interface_mac: "0c:c4:7a:33:24:be"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:33:24:be"
+              mode: "static"
+              ip: "172.16.180.3"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:33:24:bf"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:01:3e"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:01:3f"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:58:f3:ce"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:58:f3:cf"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.59.227"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        kvm02: # #cz7627-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:33:2d:6a"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:33:2d:6a"
+              mode: "static"
+              ip: "172.16.180.4"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:33:2d:6b"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:58:43:b8"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:58:43:b9"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1d:96:02"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1d:96:03"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.59.229"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        kvm03: # #cz7756-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:69:a0:4c"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:69:a0:4c"
+              mode: "static"
+              ip: "172.16.180.5"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:69:a0:4d"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "00:25:90:c0:c2:14"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "00:25:90:c0:c2:15"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:09:c2"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:09:c3"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.225.88"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        kvm04: # #cz7792-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          # pxe_interface_mac:
+          pxe_interface_mac: "0c:c4:7a:6c:83:5c"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:6c:83:5c"
+              mode: "static"
+              ip: "172.16.180.6"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:6c:83:5d"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:7d:98"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:7d:99"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:03:de"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:03:df"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.225.112"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        kvm05: # #cz7876-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:6c:88:d6"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:6c:88:d6"
+              mode: "static"
+              ip: "172.16.180.7"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:6c:88:d7"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:03:74"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:03:75"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:89:be"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:89:bf"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.225.208"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        kvm06: # #cz8073-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:df:ac"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:df:ac"
+              mode: "static"
+              ip: "172.16.180.8"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:df:ad"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:3a:f2"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:3a:f3"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:a6:4c"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:a6:4d"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.227.118"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        gtw01: # #cz9039-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:d5:84"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:d5:84"
+              mode: "static"
+              ip: "172.16.180.9"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:d5:85"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:58:41:b0"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:58:41:b1"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1d:90:d2"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1d:90:d3"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.229.28"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        gtw02: # #cz9048-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:d5:82"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:d5:82"
+              mode: "static"
+              ip: "172.16.180.10"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:d5:83"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:00:7c"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:00:7d"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:bc:88:8a"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:bc:88:8b"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.225.23"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        gtw03: # #cz8159-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:6c:bc:f6"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:6c:bc:f6"
+              mode: "static"
+              ip: "172.16.180.11"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:6c:bc:f7"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "00:25:90:9b:cc:32"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "00:25:90:9b:cc:33"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "00:25:90:c1:a5:04"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "00:25:90:c1:a5:05"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.58.9"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        osd001: # #cz9040-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:c9:02"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:c9:02"
+              mode: "static"
+              ip: "172.16.180.12"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:c9:03"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:aa:90"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:aa:91"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:0a:a4"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:0a:a5"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.58.246"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        osd002: # #cz9041-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:d5:60"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:d5:60"
+              mode: "static"
+              ip: "172.16.180.13"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:d5:61"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:04:2c"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:04:2d"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:01:f2"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:01:f3"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.58.243"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        osd003: # #cz9042-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:c9:3a"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:c9:3a"
+              mode: "static"
+              ip: "172.16.180.14"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:c9:3b"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "00:25:90:33:d7:10"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "00:25:90:33:d7:11"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "00:25:90:0b:5f:50"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "00:25:90:0b:5f:51"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.58.244"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        cmp001: # #cz9039-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:d6:aa"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:d6:aa"
+              mode: "static"
+              ip: "172.16.180.15"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:d6:ab"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:86:76"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:86:77"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:39:3c"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:39:3d"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.58.248"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        cmp002: # #cz9046-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:ce:30"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:ce:30"
+              mode: "static"
+              ip: "172.16.180.16"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:ce:31"
+              name: one2
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:ce:31"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "00:25:90:e0:7d:e0"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "00:25:90:e0:7d:e1"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:0c:0e"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:0c:0f"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.59.222"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        cmp003: # #cz8061-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:e0:ce"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:e0:ce"
+              mode: "static"
+              ip: "172.16.180.17"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:e0:cf"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1d:94:5e"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1d:94:5f"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:87:e4"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:87:e5"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.225.228"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+  mcp_version: proposed
+  mcp_docker_registry: docker-prod-local.docker.mirantis.net
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openldap_domain: bm-b300-cicd-queens-ovs-maas.local
+  openldap_enabled: 'True'
+  openldap_organisation: ${_param:cluster_name}
+  openstack_benchmark_node01_address: 10.167.11.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: golden
+  openstack_compute_count: '3'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_single_address_ranges: 10.167.11.15-10.167.11.17
+  openstack_compute_deploy_address_ranges: 172.16.180.15-172.16.180.17
+  openstack_compute_tenant_address_ranges: 10.167.12.15-10.167.12.17
+  openstack_compute_backend_address_ranges: 10.167.12.15-10.167.12.17
+  openstack_control_address: 10.167.11.10
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.167.11.11
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.167.11.12
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.167.11.13
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.167.11.50
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: 10.167.11.51
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: 10.167.11.52
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: 10.167.11.53
+  openstack_database_node03_hostname: dbs03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_deploy_address: 172.16.180.9
+  openstack_gateway_node02_deploy_address: 172.16.180.10
+  openstack_gateway_node03_deploy_address: 172.16.180.11
+  openstack_gateway_node01_address: 10.167.11.224
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node01_tenant_address: 10.167.12.9
+  openstack_gateway_node02_address: 10.167.11.225
+  openstack_gateway_node02_tenant_address: 10.167.12.10
+  openstack_gateway_node03_address: 10.167.11.226
+  openstack_gateway_node03_tenant_address: 10.167.12.11
+  openstack_message_queue_address: 10.167.11.40
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: 10.167.11.41
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: 10.167.11.42
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: 10.167.11.43
+  openstack_message_queue_node03_hostname: msg03
+  openstack_network_engine: ovs
+  openstack_neutron_qos: 'True'
+  openstack_neutron_vlan_aware_vms: 'True'
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_hugepages_count: '16'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_nfv_sriov_network: physnet2
+  openstack_nfv_sriov_numvfs: '7'
+  openstack_nfv_sriov_pf_nic: enp5s0f1
+  openstack_nova_cpu_pinning: 6,7,8,9,10,11
+  openstack_nova_compute_reserved_host_memory_mb: '900'
+  openstack_ovs_dvr_enabled: 'True'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 10.167.11.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.167.11.81
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.167.11.82
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 10.167.11.19
+  openstack_version: queens
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  salt_api_password: HlcaUHzUnsWsg62uhF8ua5KEbqRbzijz
+  salt_api_password_hash: $6$qdIFillN$XnzP7oIXRcbroVch7nlthyrSekjKlWND8q2MtoMF3Wz2ymepjAOjyqpyR55nmbH9OQzS8EcQJ6sfr5hWKDesV1
+  salt_master_address: 10.167.11.5
+  salt_master_hostname: cfg01
+  salt_master_management_address: 172.16.180.2
+  stacklight_enabled: 'True'
+  stacklight_log_address: 10.167.11.60
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 10.167.11.61
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 10.167.11.62
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 10.167.11.63
+  stacklight_log_node03_hostname: log03
+  stacklight_long_term_storage_type: prometheus
+  stacklight_monitor_address: 10.167.11.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 10.167.11.71
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 10.167.11.72
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 10.167.11.73
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_telemetry_address: 10.167.11.96
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 10.167.11.97
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 10.167.11.98
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 10.167.11.99
+  stacklight_telemetry_node03_hostname: mtr03
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.167.12.1
+  tenant_network_netmask: 255.255.254.0
+  tenant_network_subnet: 10.167.12.0/23
+  tenant_vlan: '2406'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'True'
+  ceph_enabled: 'True'
+  ceph_version: "nautilus"
+  ceph_hyper_converged: "False"
+  rbd_monitoring_enabled: 'True'
+  rbd_pool_stats_gnocchi: 'True'
+  rbd_pool_stats_volumes: 'True'
+  rbd_pool_stats_images: 'True'
+  rbd_pool_stats_backups: 'True'
+  rbd_pool_stats_vms : 'True'
+  # Apply settings for ceph from contexts/ceph/nautilus-collocated-block-db.yml
+  ceph_osd_backend: "bluestore"
+  ceph_osds_per_device: '1'
+  ceph_osd_data_size: ''
+  ceph_osd_dmcrypt: False
+  ceph_osd_count: "3"
+  ceph_osd_node_count: 3
+  ceph_osd_block_db_size: 20
+  ceph_osd_journal_size: 20
+  ceph_osd_bond_mode: "active-backup"
+  ceph_osd_data_partition_prefix: ""
+  ceph_public_network_allocation: storage
+  ceph_cluster_network: "10.167.11.0/24"
+  ceph_osd_single_address_ranges: "10.167.11.200-10.167.11.202"
+  ceph_osd_deploy_address_ranges: "172.16.180.8-172.16.180.10"
+  ceph_osd_storage_address_ranges: "10.167.11.200-10.167.11.202"
+  ceph_osd_backend_address_ranges: "10.167.12.200-10.167.12.202"
+
+  ceph_public_network: 10.167.11.0/24
+
+  ceph_osd_data_disks: "/dev/sdb"
+  ceph_osd_journal_or_block_db_disks: ""
+  ceph_osd_block_partition_prefix: ''
+  ceph_osd_mode: "separated"
+  ceph_osd_deploy_nic: "eth0"
+  ceph_osd_primary_first_nic: "eth1"
+  ceph_osd_primary_second_nic: "eth2"
+  ceph_mon_node01_address: "10.167.11.66"
+  ceph_mon_node01_hostname: "cmn01"
+  ceph_mon_node02_address: "10.167.11.67"
+  ceph_mon_node02_hostname: "cmn02"
+  ceph_mon_node03_address: "10.167.11.68"
+  ceph_mon_node03_hostname: "cmn03"
+  ceph_rgw_address: "10.167.11.75"
+  ceph_rgw_node01_address: "10.167.11.76"
+  ceph_rgw_node01_hostname: "rgw01"
+  ceph_rgw_node02_address: "10.167.11.77"
+  ceph_rgw_node02_hostname: "rgw02"
+  ceph_rgw_node03_address: "10.167.11.78"
+  ceph_rgw_node03_hostname: "rgw03"
+  rsync_fernet_rotation: 'True'
+  compute_padding_with_zeros: True
+  designate_backend: powerdns
+  designate_enabled: 'True'
+  openstack_dns_node01_address: 10.167.11.113
+  openstack_dns_node02_address: 10.167.11.114
+  nova_vnc_tls_enabled: 'True'
+  galera_ssl_enabled: 'True'
+  openstack_mysql_x509_enabled: 'True'
+  rabbitmq_ssl_enabled: 'True'
+  openstack_rabbitmq_x509_enabled: 'True'
+  openstack_rabbitmq_standalone_mode: 'True'
+  openstack_internal_protocol: 'https'
+  tenant_telemetry_enabled: 'True'
+  gnocchi_aggregation_storage: ceph
+  openstack_telemetry_address: 10.167.11.83
+  openstack_telemetry_hostname: mdb
+  openstack_telemetry_node01_address: 10.167.11.84
+  openstack_telemetry_node01_hostname: mdb01
+  openstack_telemetry_node02_address: 10.167.11.85
+  openstack_telemetry_node02_hostname: mdb02
+  openstack_telemetry_node03_address: 10.167.11.86
+  openstack_telemetry_node03_hostname: mdb03
+  barbican_backend: dogtag
+  barbican_enabled: 'True'
+  barbican_integration_enabled: 'False'
+  openstack_barbican_address: 10.167.11.44
+  openstack_barbican_hostname: kmn
+  openstack_barbican_node01_address: 10.167.11.45
+  openstack_barbican_node01_hostname: kmn01
+  openstack_barbican_node02_address: 10.167.11.46
+  openstack_barbican_node02_hostname: kmn02
+  openstack_barbican_node03_address: 10.167.11.47
+  openstack_barbican_node03_hostname: kmn03
+  openstack_create_public_network: 'True'
+  openstack_public_neutron_subnet_gateway: 172.17.42.1
+  openstack_public_neutron_subnet_cidr: 172.17.42.0/26
+  openstack_public_neutron_subnet_allocation_start: 172.17.42.20
+  openstack_public_neutron_subnet_allocation_end: 172.17.42.55
+  backend_vlan: '2402'
+  manila_enabled: 'False'
+  openscap_enabled: 'True'
+  octavia_health_manager_node01_address: 192.168.1.10
+  octavia_health_manager_node02_address: 192.168.1.11
+  octavia_health_manager_node03_address: 192.168.1.12
+  octavia_manager_cluster: 'True'
+  octavia_amphora_topology: 'ACTIVE_STANDBY'
+  octavia_spare_amphora_pool_size: 1
+  octavia_lb_mgmt_cidr: 192.168.1.0/24
+  octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
+  octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
+  openstack_octavia_enabled: 'True'
+  secrets_encryption_enabled: 'True'
+  secrets_encryption_key_id: 'F5CB2ADC36159B03'
+  # Used on CI only.
+  secrets_encryption_private_key: ''
+  cinder_backup_engine: 'ceph'
+  cinder_ceph_backup_pool_name: 'backups'
+  openstack_keystone_security:
+    disable_user_account_days_inactive: 7
+    lockout_failure_attempts: 60
+    lockout_duration: 600
+    password_expires_days: 730
+    unique_last_password_count: 5
+    minimum_password_age: 0
+    password_regex: "'^[a-zA-Z0-9~!@#%^&\\*_=+]{32,}$$'"
+    password_regex_description: "Your password could contains capital letters, lowercase letters, digits, symbols '~ ! @ # % ^ & * _ = +' and have a minimum length of 32 characters"
+    change_password_upon_first_use: False
+  stacklight_ssl_enabled: 'True'
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt-context-environment.yaml b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt-context-environment.yaml
new file mode 100644
index 0000000..eec4779
--- /dev/null
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt-context-environment.yaml
@@ -0,0 +1,200 @@
+nodes:
+    cfg01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      - features_runtest
+      interfaces:
+        ens3:
+          role: single_static_mgm
+        ens4:
+          role: single_static_ctl
+
+    # Physical nodes
+    kvm01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: infra_kvm_node01
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
+        ten1:
+          role: bond_baremetal_lacp
+        sten1:
+          role: bond_baremetal_lacp
+
+    kvm02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: infra_kvm_node02
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
+        ten1:
+          role: bond_baremetal_lacp
+        sten1:
+          role: bond_baremetal_lacp
+
+    kvm03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: infra_kvm_node03
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
+        ten1:
+          role: bond_baremetal_lacp
+        sten1:
+          role: bond_baremetal_lacp
+
+    kvm04.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: infra_kvm_node04
+      roles:
+      - infra_kvm_wo_gluster
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
+        ten1:
+          role: bond_baremetal_lacp
+        sten1:
+          role: bond_baremetal_lacp
+
+    kvm05.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: infra_kvm_node05
+      roles:
+      - infra_kvm_wo_gluster
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
+        ten1:
+          role: bond_baremetal_lacp
+        sten1:
+          role: bond_baremetal_lacp
+
+    kvm06.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: infra_kvm_node06
+      roles:
+      - infra_kvm_wo_gluster
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
+        ten1:
+          role: bond_baremetal_lacp
+        sten1:
+          role: bond_baremetal_lacp
+
+    osd<<count>>:
+      reclass_storage_name: ceph_osd_rack01
+      roles:
+      - ceph_osd
+      - linux_system_codename_xenial
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
+#          role: bond0_ab_vlan_ceph_storage_backend
+
+    cmp<<count>>:
+      reclass_storage_name: openstack_compute_rack01
+      roles:
+      - openstack_compute
+      - linux_system_codename_xenial
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten1:
+          role: bond_ctl_lacp
+        ten2:
+          role: bond_prv_lacp
+        sten1:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_prv_lacp
+
+    gtw01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_gateway_node01
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten1:
+          role: bond_ctl_baremetal_lacp
+        ten2:
+          role: bond_prv_lacp
+        sten1:
+          role: bond_ctl_baremetal_lacp
+        sten2:
+          role: bond_prv_lacp
+
+    gtw02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_gateway_node02
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten1:
+          role: bond_ctl_baremetal_lacp
+        ten2:
+          role: bond_prv_lacp
+        sten1:
+          role: bond_ctl_baremetal_lacp
+        sten2:
+          role: bond_prv_lacp
+
+    gtw03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_gateway_node03
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten1:
+          role: bond_ctl_baremetal_lacp
+        ten2:
+          role: bond_prv_lacp
+        sten1:
+          role: bond_ctl_baremetal_lacp
+        sten2:
+          role: bond_prv_lacp
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt-context-vcp-environment.yaml b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt-context-vcp-environment.yaml
new file mode 100644
index 0000000..f9671d6
--- /dev/null
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt-context-vcp-environment.yaml
@@ -0,0 +1,448 @@
+nodes:
+    ctl01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_control_node01
+      roles:
+      - openstack_control_leader
+      - linux_system_codename_xenial
+      - features_ironic_baremetal_nodes
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    ctl02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_control_node02
+      roles:
+      - openstack_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    ctl03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_control_node03
+      roles:
+      - openstack_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    dbs01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_database_node01
+      roles:
+      - openstack_database_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    dbs02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_database_node02
+      roles:
+      - openstack_database
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    dbs03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_database_node03
+      roles:
+      - openstack_database
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    msg01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_message_queue_node01
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    msg02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_message_queue_node02
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    msg03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_message_queue_node03
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    prx01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_proxy_node01
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    prx02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_proxy_node02
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    cid01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: cicd_control_node01
+      roles:
+      - cicd_control_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    cid02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: cicd_control_node02
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    cid03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: cicd_control_node03
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    mon01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: stacklight_server_node01
+      roles:
+      - stacklightv2_server_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    mon02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: stacklight_server_node02
+      roles:
+      - stacklightv2_server
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    mon03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: stacklight_server_node03
+      roles:
+      - stacklightv2_server
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    mtr01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: stacklight_telemetry_node01
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    mtr02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: stacklight_telemetry_node02
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    mtr03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: stacklight_telemetry_node03
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    log01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: stacklight_log_node01
+      roles:
+      - stacklight_log_leader_v2
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    log02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: stacklight_log_node02
+      roles:
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    log03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: stacklight_log_node03
+      roles:
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    cmn01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: ceph_mon_node01
+      roles:
+      - ceph_mon
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    cmn02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: ceph_mon_node02
+      roles:
+      - ceph_mon
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    cmn03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: ceph_mon_node03
+      roles:
+      - ceph_mon
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    rgw01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: ceph_rgw_node01
+      roles:
+      - ceph_rgw
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    rgw02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: ceph_rgw_node02
+      roles:
+      - ceph_rgw
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    rgw03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: ceph_rgw_node03
+      roles:
+      - ceph_rgw
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    mdb01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_telemetry_node01
+      roles:
+      - linux_system_codename_xenial
+      - openstack_telemetry
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    mdb02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_telemetry_node02
+      roles:
+      - linux_system_codename_xenial
+      - openstack_telemetry
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    mdb03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_telemetry_node03
+      roles:
+      - linux_system_codename_xenial
+      - openstack_telemetry
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    dns01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_dns_node01
+      roles:
+      - openstack_dns
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    dns02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_dns_node02
+      roles:
+      - openstack_dns
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    kmn01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_barbican_node01
+      roles:
+      - openstack_barbican
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    kmn02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_barbican_node02
+      roles:
+      - openstack_barbican
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    kmn03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_barbican_node03
+      roles:
+      - openstack_barbican
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    bmt01.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_baremetal_node01
+      roles:
+      - openstack_baremetal
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+        ens4:
+          role: single_baremetal
+
+    bmt02.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_baremetal_node02
+      roles:
+      - openstack_baremetal
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+        ens4:
+          role: single_baremetal
+
+    bmt03.bm-b300-cicd-queens-ovs-maas.local:
+      reclass_storage_name: openstack_baremetal_node03
+      roles:
+      - openstack_baremetal
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+        ens4:
+          role: single_baremetal
+
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt.yaml b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt.yaml
new file mode 100644
index 0000000..877ee47
--- /dev/null
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/salt.yaml
@@ -0,0 +1,85 @@
+{% set HOSTNAME_CFG01='cfg01.bm-b300-cicd-queens-ovs-maas.local' %}
+{% set LAB_CONFIG_NAME='bm-b300-cicd-queens-ovs-maas' %}
+{% set DOMAIN_NAME='bm-b300-cicd-queens-ovs-maas.local' %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-test-tools.yaml' as SHARED_TEST_TOOLS with context %}
+{% import 'shared-workarounds.yaml' as SHARED_WORKAROUNDS with context %}
+{% import 'shared-maas.yaml' as SHARED_MAAS with context %}
+
+- description: Wait for salt-master is ready after configdrive user-data
+  cmd: |
+    timeout 120 salt-call test.ping
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: Generate a public key for machines in MAAS
+  cmd: |
+    ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub
+    maas mirantis sshkeys create key="$(cat ~root/.ssh/id_rsa.pub)"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Run comissioning of BM nodes
+  cmd: |
+    salt-call maas.process_machines
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Wait for machines ready
+  cmd: |
+    salt-call maas.machines_status && timeout 120 salt-call state.sls maas.machines.wait_for_ready
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 7, delay: 5}
+  skip_fail: false
+
+- description: Enforce the interfaces configuration defined in the model for servers
+  cmd: |
+    salt-call state.sls maas.machines.assign_ip;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Assign drive partitions to machines
+  cmd: salt-call state.sls maas.machines.storage
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Remove all the salt-minions and re-register the cfg01 minion
+  cmd: |
+    salt-key -y -D;
+    salt-call test.ping
+    sleep 5
+    # Check that the cfg01 is registered
+    salt-key | grep cfg01
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: provision the automatically commissioned physical nodes through MAAS
+  cmd: |
+    salt-call maas.deploy_machines;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: Wait for machines deployed
+  cmd: |
+    salt-call maas.machines_status && timeout 300 salt-call state.sls maas.machines.wait_for_deployed
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 6, delay: 5}
+  skip_fail: false
+
+{{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
+{{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
+{{ SHARED_WORKAROUNDS.CLEAR_CEPH_OSD_DRIVES() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/tempest_skip.list b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/tempest_skip.list
new file mode 100644
index 0000000..c9c567a
--- /dev/null
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/tempest_skip.list
@@ -0,0 +1,30 @@
+# Globally disabled inside the 'ci-tempest' docker image
+tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container
+tempest.api.object_storage.test_healthcheck.HealthcheckTest.test_get_healthcheck
+tempest.api.object_storage.test_container_sync_middleware.ContainerSyncMiddlewareTest.test_container_synchronization
+tempest.api.object_storage.test_crossdomain.CrossdomainTest.test_get_crossdomain_policy
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_value_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_name_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_exceeds_overall_metadata_count
+tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value
+tempest.api.object_storage.test_account_services_negative.AccountNegativeTest.test_list_containers_with_non_authorized_user
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota\b
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object\b
+tempest.api.object_storage.test_account_quotas_negative.AccountQuotasNegativeTest.test_user_modify_quota
+
+# PROD-22111 Need to align integration CI labs configuration to pass Tempest tests with WaitCondition
+heat_tempest_plugin.tests.functional.test_os_wait_condition.OSWaitCondition.test_create_stack_with_multi_signal_waitcondition
+heat_tempest_plugin.tests.scenario.test_server_cfn_init.CfnInitIntegrationTest.test_server_cfn_init\b
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_raw
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+heat_tempest_plugin.tests.scenario.test_autoscaling_lbv2.AutoscalingLoadBalancerv2Test.test_autoscaling_loadbalancer_neutron
+
+# PROD-29650 failed with PortNotFound
+tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces_by_fixed_ip
+
+# PROD-33000 [OC][Infra] Instances don't have access to external net
+# tempest.api.compute.volumes.test_attach_volume.AttachVolumeShelveTestJSON.test_detach_volume_shelved_or_offload_server\b
+# tempest.api.compute.volumes.test_attach_volume.AttachVolumeShelveTestJSON.test_attach_volume_shelved_or_offload_server\b
+# tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_suspend_resume\b
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml
new file mode 100644
index 0000000..2ebdf1f
--- /dev/null
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml
@@ -0,0 +1,82 @@
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ssh_pwauth: True
+users:
+  - name: root
+    sudo: ALL=(ALL) NOPASSWD:ALL
+    shell: /bin/bash
+  - name: jenkins
+    sudo: ALL=(ALL) NOPASSWD:ALL
+    shell: /bin/bash
+    ssh_authorized_keys:
+      - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFSxeuXh2sO4VYL8N2dlNFVyNcr2RvoH4MeDD/cV2HThfU4/BcH6IOOWXSDibIU279bWVKCL7QUp3mf0Vf7HPuyFuC12QM+l7MwT0jCYh5um3hmAvM6Ga0nkhJygHexe9/rLEYzZJkIjP9/IS/YXSv8rhHg484wQ6qkEuq15nyMqil8tbDQCq0XQ+AWEpNpIa4pUoKmFMsOP8lq10KZXIXsJyZxizadr6Bh4Lm9LWrk8YCw7qP3rmgWxK/s8qXQh1ISZe6ONfcmk6p03qbh4H3CwKyWzxmnIHQvE6PgN/O+PuAZj3PbR2mkkJjYX4jNPlxvj8uTStaVPhAwfR9Spdx jenkins@cz8133
+
+disable_root: false
+chpasswd:
+  list: |
+    root:r00tme
+    jenkins:qalab
+  expire: False
+
+packages:
+  - openjdk-8-jre-headless
+  - libyaml-dev
+  - libffi-dev
+  - libvirt-dev
+  - python-dev
+  - python-pip
+  - python-virtualenv
+  #- python-psycopg2
+  - pkg-config
+  - vlan
+  - bridge-utils
+  - ebtables
+
+bootcmd:
+  # Enable root access
+  - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+  - service sshd restart
+output:
+  all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+runcmd:
+  # Create swap
+  - fallocate -l 16G /swapfile
+  - chmod 600 /swapfile
+  - mkswap /swapfile
+  - swapon /swapfile
+  - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+  - find /etc/network/interfaces.d/ -type f -delete
+  - kill $(pidof /sbin/dhclient) || /bin/true
+  - ip a flush dev ens3
+  - ip a flush dev ens4
+  - rm -f /var/run/network/ifstate.ens3
+  - rm -f /var/run/network/ifstate.ens4
+  - ip route delete default || /bin/true
+  - ifup ens3
+  - ifup ens4
+
+write_files:
+  - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+    content: |
+      GRUB_RECORDFAIL_TIMEOUT=30
+      GRUB_TIMEOUT=3
+      GRUB_TIMEOUT_STYLE=menu
+
+  - path: /etc/network/interfaces
+    content: |
+      auto ens3
+      iface ens3 inet static
+      address $management_static_ip
+      netmask 255.255.254.0
+      gateway $management_gw
+      dns-nameservers $dnsaddress
+
+      auto ens4
+      iface ens4 inet static
+      address $control_static_ip
+      netmask 255.255.254.0
+
+  - path: /etc/bash_completion.d/fuel_devops30_activate
+    content: |
+      source /home/jenkins/fuel-devops30/bin/activate
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay-userdata.yaml b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay-userdata.yaml
new file mode 100644
index 0000000..bb6338c
--- /dev/null
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay-userdata.yaml
@@ -0,0 +1,81 @@
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ssh_pwauth: True
+users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+
+disable_root: false
+chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+runcmd:
+   - if lvs vg0; then pvresize $(pvdisplay -C -S vg_name=vg0 -o pv_name --noheadings | tail -n1); fi
+   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo resolvconf -u
+   #- sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 16G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /usr/share/growlvm/image-layout.yml
+     content: |
+       root:
+         size: '65%VG'
+       home:
+         size: '1%VG'
+       var_log:
+         size: '10%VG'
+       var_log_audit:
+         size: '5%VG'
+       var_tmp:
+         size: '10%VG'
+       tmp:
+         size: '5%VG'
+     owner: root:root
+
+growpart:
+    mode: auto
+    devices:
+      - '/'
+      - '/dev/vda3'
+      - '/dev/vdb3'
+      - '/dev/vdc3'
+      - '/dev/vdd3'
+    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay.hot b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay.hot
new file mode 100644
index 0000000..da17023
--- /dev/null
+++ b/tcp_tests/templates/bm-b300-cicd-queens-ovs-maas/underlay.hot
@@ -0,0 +1,143 @@
+---
+
+heat_template_version: queens
+
+description: MCP environment for bm-b300-cicd-queens-ovs-maas
+
+parameters:
+  instance_domain:
+    type: string
+    default: bm-b300-cicd-queens-ovs-maas.local
+  mcp_version:
+    type: string
+  env_name:
+    type: string
+  key_pair:
+    type: string
+  cfg_flavor:
+    type: string
+  foundation_image:
+    type: string
+  foundation_flavor:
+    type: string
+  bm_availability_zone:
+    type: string
+  control_subnet_cidr:
+    type: string
+    default: "10.167.11.0/23"
+  tenant_subnet_cidr:
+    type: string
+    default: "10.167.12.0/23"
+  external_subnet_cidr:
+    type: string
+    default: "172.17.42.0/26"
+  management_subnet_cidr:
+    type: string
+    default: "172.16.180.0/23"
+  management_subnet_cfg01_ip:
+    type: string
+    default: 172.16.180.2
+  management_subnet_gateway_ip:
+    type: string
+    default: 172.16.180.1
+  management_subnet_pool_start:
+    type: string
+    default: 172.16.180.3
+  management_subnet_pool_end:
+    type: string
+    default: 172.16.180.61
+  salt_master_control_ip:
+    type: string
+    default: 10.167.11.5
+  deploy_empty_node:
+    type: boolean
+    default: False
+
+resources:
+  subnets:
+    type: MCP::Subnets
+    properties:
+      stack_name: { get_param: "OS::stack_name" }
+      env_name: { get_param: env_name }
+      management_net: 'system-phys-2401'
+      control_net: 'system-phys-2404'
+      tenant_net: 'system-phys-2406'
+      external_net: 'system-phys-2403'
+      control_subnet_cidr: { get_param: control_subnet_cidr }
+      tenant_subnet_cidr: { get_param: tenant_subnet_cidr }
+      external_subnet_cidr: { get_param: external_subnet_cidr }
+      management_subnet_cidr: { get_param: management_subnet_cidr }
+      management_subnet_gateway_ip: { get_param: management_subnet_gateway_ip }
+      management_subnet_pool_start: { get_param: management_subnet_pool_start }
+      management_subnet_pool_end: { get_param: management_subnet_pool_end }
+
+  #flavors:
+  #  type: MCP::Flavors
+
+  cfg01_node:
+    type: MCP::MasterNode
+    depends_on: [subnets]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      cfg01_flavor: re.jenkins.slave.large
+      availability_zone: { get_param: bm_availability_zone }
+      management_net: 'system-phys-2401'
+      control_net: 'system-phys-2404'
+      tenant_net: 'system-phys-2406'
+      external_net: 'system-phys-2403'
+      salt_master_control_ip: { get_param: salt_master_control_ip }
+      management_subnet_cfg01_ip: { get_param: management_subnet_cfg01_ip }
+      tenant_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, tenant_net_prefix] }, '5' ]
+      external_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, external_net_prefix] }, '5' ]
+      instance_name: cfg01
+      instance_domain: {get_param: instance_domain}
+
+  foundation_node:
+    type: MCP::FoundationNode
+    depends_on: [subnets]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance_name: foundation
+      availability_zone: { get_param: bm_availability_zone }
+      management_net: 'system-phys-2401'
+      control_net: 'system-phys-2404'
+      tenant_net: 'system-phys-2406'
+      external_net: 'system-phys-2403'
+      management_subnet_gateway_ip: { get_param: management_subnet_gateway_ip }
+      instance_image: { get_param: foundation_image }
+      instance_flavor: {get_param: foundation_flavor}
+      underlay_userdata: { get_file: ./underlay--user-data-foundation.yaml }
+      management_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, management_net_prefix] }, '251' ]
+      control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, control_net_prefix] }, '6' ]
+      tenant_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, tenant_net_prefix] }, '6' ]
+      external_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, external_net_prefix] }, '6' ]
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+outputs:
+  foundation_public_ip:
+    description: foundation node IP address (management)
+    value:
+      get_attr:
+      - foundation_node
+      - instance_address
+...
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index 42bf3e9..2499b71 100644
--- a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -21,9 +21,9 @@
   control_vlan: '2404'
 
   jenkins_pipelines_branch: 'release/2019.2.0'
-  deploy_network_gateway: 172.16.164.1
-  deploy_network_netmask: 255.255.255.192
-  deploy_network_subnet: 172.16.164.0/26
+  deploy_network_gateway: 172.16.180.1
+  deploy_network_netmask: 255.255.254.0
+  deploy_network_subnet: 172.16.180.0/23
   deployment_type: physical
   dns_server01: 172.18.176.6
   dns_server02: 172.18.224.6
@@ -32,22 +32,22 @@
   infra_bond_mode: active-backup
   infra_deploy_nic: eth0
   infra_kvm01_control_address: 10.167.11.241
-  infra_kvm01_deploy_address: 172.16.164.3
+  infra_kvm01_deploy_address: 172.16.180.3
   infra_kvm01_hostname: kvm01
   infra_kvm02_control_address: 10.167.11.242
-  infra_kvm02_deploy_address: 172.16.164.4
+  infra_kvm02_deploy_address: 172.16.180.4
   infra_kvm02_hostname: kvm02
   infra_kvm03_control_address: 10.167.11.243
-  infra_kvm03_deploy_address: 172.16.164.5
+  infra_kvm03_deploy_address: 172.16.180.5
   infra_kvm03_hostname: kvm03
   infra_kvm04_control_address: 10.167.11.244
-  infra_kvm04_deploy_address: 172.16.164.6
+  infra_kvm04_deploy_address: 172.16.180.6
   infra_kvm04_hostname: kvm04
   infra_kvm05_control_address: 10.167.11.245
-  infra_kvm05_deploy_address: 172.16.164.7
+  infra_kvm05_deploy_address: 172.16.180.7
   infra_kvm05_hostname: kvm05
   infra_kvm06_control_address: 10.167.11.246
-  infra_kvm06_deploy_address: 172.16.164.8
+  infra_kvm06_deploy_address: 172.16.180.8
   infra_kvm06_hostname: kvm06
   infra_kvm_vip_address: 10.167.11.240
   infra_primary_first_nic: eth1
@@ -55,11 +55,11 @@
   kubernetes_enabled: 'False'
   local_repositories: 'False'
   maas_enabled: 'True'
-  maas_deploy_address: 172.16.164.2
-  maas_deploy_cidr: 172.16.164.0/26
-  maas_deploy_gateway: 172.16.164.1
-  maas_deploy_range_end: 172.16.164.61
-  maas_deploy_range_start: 172.16.164.18
+  maas_deploy_address: 172.16.180.2
+  maas_deploy_cidr: 172.16.180.0/23
+  maas_deploy_gateway: 172.16.180.1
+  maas_deploy_range_end: 172.16.180.61
+  maas_deploy_range_start: 172.16.180.18
   maas_dhcp_enabled: 'True'
   maas_fabric_name: fabric-0
   maas_hostname: cfg01
@@ -74,7 +74,7 @@
             one1:
               mac: "0c:c4:7a:33:24:be"
               mode: "static"
-              ip: "172.16.164.3"
+              ip: "172.16.180.3"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -111,7 +111,7 @@
             one1:
               mac: "0c:c4:7a:33:2d:6a"
               mode: "static"
-              ip: "172.16.164.4"
+              ip: "172.16.180.4"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -148,7 +148,7 @@
             one1:
               mac: "0c:c4:7a:69:a0:4c"
               mode: "static"
-              ip: "172.16.164.5"
+              ip: "172.16.180.5"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -186,7 +186,7 @@
             one1:
               mac: "0c:c4:7a:6c:83:5c"
               mode: "static"
-              ip: "172.16.164.6"
+              ip: "172.16.180.6"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -223,7 +223,7 @@
             one1:
               mac: "0c:c4:7a:6c:88:d6"
               mode: "static"
-              ip: "172.16.164.7"
+              ip: "172.16.180.7"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -260,7 +260,7 @@
             one1:
               mac: "0c:c4:7a:aa:df:ac"
               mode: "static"
-              ip: "172.16.164.8"
+              ip: "172.16.180.8"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -297,7 +297,7 @@
             one1:
               mac: "0c:c4:7a:aa:d5:84"
               mode: "static"
-              ip: "172.16.164.9"
+              ip: "172.16.180.9"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -334,7 +334,7 @@
             one1:
               mac: "0c:c4:7a:aa:d5:82"
               mode: "static"
-              ip: "172.16.164.10"
+              ip: "172.16.180.10"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -371,7 +371,7 @@
             one1:
               mac: "0c:c4:7a:6c:bc:f6"
               mode: "static"
-              ip: "172.16.164.11"
+              ip: "172.16.180.11"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -408,7 +408,7 @@
             one1:
               mac: "0c:c4:7a:aa:c9:02"
               mode: "static"
-              ip: "172.16.164.12"
+              ip: "172.16.180.12"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -445,7 +445,7 @@
             one1:
               mac: "0c:c4:7a:aa:d5:60"
               mode: "static"
-              ip: "172.16.164.13"
+              ip: "172.16.180.13"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -482,7 +482,7 @@
             one1:
               mac: "0c:c4:7a:aa:c9:3a"
               mode: "static"
-              ip: "172.16.164.14"
+              ip: "172.16.180.14"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -519,7 +519,7 @@
             one1:
               mac: "0c:c4:7a:aa:d6:aa"
               mode: "static"
-              ip: "172.16.164.15"
+              ip: "172.16.180.15"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -556,7 +556,7 @@
             one1:
               mac: "0c:c4:7a:aa:ce:30"
               mode: "static"
-              ip: "172.16.164.16"
+              ip: "172.16.180.16"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -597,7 +597,7 @@
             one1:
               mac: "0c:c4:7a:aa:e0:ce"
               mode: "static"
-              ip: "172.16.164.17"
+              ip: "172.16.180.17"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -639,7 +639,7 @@
   openstack_compute_count: '3'
   openstack_compute_rack01_hostname: cmp
   openstack_compute_single_address_ranges: 10.167.11.15-10.167.11.17
-  openstack_compute_deploy_address_ranges: 172.16.164.15-172.16.164.17
+  openstack_compute_deploy_address_ranges: 172.16.180.15-172.16.180.17
   openstack_compute_tenant_address_ranges: 10.167.12.15-10.167.12.17
   openstack_compute_backend_address_ranges: 10.167.12.15-10.167.12.17
   openstack_control_address: 10.167.11.10
@@ -659,9 +659,9 @@
   openstack_database_node03_address: 10.167.11.53
   openstack_database_node03_hostname: dbs03
   openstack_enabled: 'True'
-  openstack_gateway_node01_deploy_address: 172.16.164.9
-  openstack_gateway_node02_deploy_address: 172.16.164.10
-  openstack_gateway_node03_deploy_address: 172.16.164.11
+  openstack_gateway_node01_deploy_address: 172.16.180.9
+  openstack_gateway_node02_deploy_address: 172.16.180.10
+  openstack_gateway_node03_deploy_address: 172.16.180.11
   openstack_gateway_node01_address: 10.167.11.224
   openstack_gateway_node01_hostname: gtw01
   openstack_gateway_node02_hostname: gtw02
@@ -708,7 +708,7 @@
   salt_api_password_hash: $6$qdIFillN$XnzP7oIXRcbroVch7nlthyrSekjKlWND8q2MtoMF3Wz2ymepjAOjyqpyR55nmbH9OQzS8EcQJ6sfr5hWKDesV1
   salt_master_address: 10.167.11.5
   salt_master_hostname: cfg01
-  salt_master_management_address: 172.16.164.2
+  salt_master_management_address: 172.16.180.2
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.11.60
   stacklight_log_hostname: log
@@ -756,7 +756,7 @@
   ceph_public_network_allocation: storage
   ceph_cluster_network: "10.167.11.0/24"
   ceph_osd_single_address_ranges: "10.167.11.200-10.167.11.202"
-  ceph_osd_deploy_address_ranges: "172.16.164.8-172.16.164.10"
+  ceph_osd_deploy_address_ranges: "172.16.180.8-172.16.180.10"
   ceph_osd_storage_address_ranges: "10.167.11.200-10.167.11.202"
   ceph_osd_backend_address_ranges: "10.167.12.200-10.167.12.202"
   ceph_osd_data_disks: "/dev/sdb"
@@ -850,6 +850,6 @@
   stacklight_ssl_enabled: 'True'
 
   # Enable Mirantis repo with CVE fixes for xenial
-  updates_mirantis_login: "root"
-  updates_mirantis_password: "r00tme"
+  updates_mirantis_login: "%LOGIN%"
+  updates_mirantis_password: "%PASS%"
   updates_mirantis_version: "staging"
\ No newline at end of file
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay--user-data-foundation.yaml b/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay--user-data-foundation.yaml
index c9fd9d6..617b1fa 100644
--- a/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay--user-data-foundation.yaml
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay--user-data-foundation.yaml
@@ -68,7 +68,7 @@
       auto ens3
       iface ens3 inet static
       address $management_static_ip
-      netmask 255.255.255.192
+      netmask 255.255.254.0
       gateway $management_gw
       dns-nameservers $dnsaddress
 
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay.hot b/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay.hot
index 0d37b6f..f40da29 100644
--- a/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay.hot
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay.hot
@@ -33,19 +33,19 @@
     default: "172.17.42.0/26"
   management_subnet_cidr:
     type: string
-    default: "172.16.164.0/26"
+    default: "172.16.180.0/23"
   management_subnet_cfg01_ip:
     type: string
-    default: 172.16.164.2
+    default: 172.16.180.2
   management_subnet_gateway_ip:
     type: string
-    default: 172.16.164.1
+    default: 172.16.180.1
   management_subnet_pool_start:
     type: string
-    default: 172.16.164.3
+    default: 172.16.180.3
   management_subnet_pool_end:
     type: string
-    default: 172.16.164.61
+    default: 172.16.180.61
   salt_master_control_ip:
     type: string
     default: 10.167.11.5
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index e2f4d14..81d3bf7 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -36,9 +36,9 @@
   control_vlan: '2404'
 
   jenkins_pipelines_branch: 'release/2019.2.0'
-  deploy_network_gateway: 172.16.164.1
-  deploy_network_netmask: 255.255.255.192
-  deploy_network_subnet: 172.16.164.0/26
+  deploy_network_gateway: 172.16.180.1
+  deploy_network_netmask: 255.255.254.0
+  deploy_network_subnet: 172.16.180.0/23
   deployment_type: physical
   dns_server01: 172.18.176.6
   dns_server02: 172.18.224.6
@@ -47,22 +47,22 @@
   infra_bond_mode: active-backup
   infra_deploy_nic: eth0
   infra_kvm01_control_address: 10.167.11.241
-  infra_kvm01_deploy_address: 172.16.164.3
+  infra_kvm01_deploy_address: 172.16.180.3
   infra_kvm01_hostname: kvm01
   infra_kvm02_control_address: 10.167.11.242
-  infra_kvm02_deploy_address: 172.16.164.4
+  infra_kvm02_deploy_address: 172.16.180.4
   infra_kvm02_hostname: kvm02
   infra_kvm03_control_address: 10.167.11.243
-  infra_kvm03_deploy_address: 172.16.164.5
+  infra_kvm03_deploy_address: 172.16.180.5
   infra_kvm03_hostname: kvm03
   infra_kvm04_control_address: 10.167.11.244
-  infra_kvm04_deploy_address: 172.16.164.6
+  infra_kvm04_deploy_address: 172.16.180.6
   infra_kvm04_hostname: kvm04
   infra_kvm05_control_address: 10.167.11.245
-  infra_kvm05_deploy_address: 172.16.164.7
+  infra_kvm05_deploy_address: 172.16.180.7
   infra_kvm05_hostname: kvm05
   infra_kvm06_control_address: 10.167.11.246
-  infra_kvm06_deploy_address: 172.16.164.8
+  infra_kvm06_deploy_address: 172.16.180.8
   infra_kvm06_hostname: kvm06
   infra_kvm_vip_address: 10.167.11.240
   infra_primary_first_nic: eth1
@@ -70,11 +70,11 @@
   kubernetes_enabled: 'False'
   local_repositories: 'False'
   maas_enabled: 'True'
-  maas_deploy_address: 172.16.164.2
-  maas_deploy_cidr: 172.16.164.0/26
-  maas_deploy_gateway: 172.16.164.1
-  maas_deploy_range_end: 172.16.164.62
-  maas_deploy_range_start: 172.16.164.18
+  maas_deploy_address: 172.16.180.2
+  maas_deploy_cidr: 172.16.180.0/23
+  maas_deploy_gateway: 172.16.180.1
+  maas_deploy_range_end: 172.16.180.62
+  maas_deploy_range_start: 172.16.180.18
   maas_dhcp_enabled: 'True'
   maas_fabric_name: fabric-0
   maas_hostname: cfg01
@@ -89,7 +89,7 @@
             one1:
               mac: "0c:c4:7a:33:24:be"
               mode: "static"
-              ip: "172.16.164.3"
+              ip: "172.16.180.3"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -126,7 +126,7 @@
             one1:
               mac: "0c:c4:7a:33:2d:6a"
               mode: "static"
-              ip: "172.16.164.4"
+              ip: "172.16.180.4"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -163,7 +163,7 @@
             one1:
               mac: "0c:c4:7a:69:a0:4c"
               mode: "static"
-              ip: "172.16.164.5"
+              ip: "172.16.180.5"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -201,7 +201,7 @@
             one1:
               mac: "0c:c4:7a:6c:83:5c"
               mode: "static"
-              ip: "172.16.164.6"
+              ip: "172.16.180.6"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -238,7 +238,7 @@
             one1:
               mac: "0c:c4:7a:6c:88:d6"
               mode: "static"
-              ip: "172.16.164.7"
+              ip: "172.16.180.7"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -275,7 +275,7 @@
             one1:
               mac: "0c:c4:7a:aa:df:ac"
               mode: "static"
-              ip: "172.16.164.8"
+              ip: "172.16.180.8"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -312,7 +312,7 @@
             one1:
               mac: "0c:c4:7a:aa:d5:84"
               mode: "static"
-              ip: "172.16.164.9"
+              ip: "172.16.180.9"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -349,7 +349,7 @@
             one1:
               mac: "0c:c4:7a:aa:d5:82"
               mode: "static"
-              ip: "172.16.164.10"
+              ip: "172.16.180.10"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -386,7 +386,7 @@
             one1:
               mac: "0c:c4:7a:6c:bc:f6"
               mode: "static"
-              ip: "172.16.164.11"
+              ip: "172.16.180.11"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -423,7 +423,7 @@
             one1:
               mac: "0c:c4:7a:aa:c9:02"
               mode: "static"
-              ip: "172.16.164.12"
+              ip: "172.16.180.12"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -460,7 +460,7 @@
             one1:
               mac: "0c:c4:7a:aa:d5:60"
               mode: "static"
-              ip: "172.16.164.13"
+              ip: "172.16.180.13"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -497,7 +497,7 @@
             one1:
               mac: "0c:c4:7a:aa:c9:3a"
               mode: "static"
-              ip: "172.16.164.14"
+              ip: "172.16.180.14"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -534,7 +534,7 @@
             one1:
               mac: "0c:c4:7a:aa:d6:aa"
               mode: "static"
-              ip: "172.16.164.15"
+              ip: "172.16.180.15"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -571,7 +571,7 @@
             one1:
               mac: "0c:c4:7a:aa:ce:30"
               mode: "static"
-              ip: "172.16.164.16"
+              ip: "172.16.180.16"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -612,7 +612,7 @@
             one1:
               mac: "0c:c4:7a:aa:e0:ce"
               mode: "static"
-              ip: "172.16.164.17"
+              ip: "172.16.180.17"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -654,7 +654,7 @@
   openstack_compute_count: '3'
   openstack_compute_rack01_hostname: cmp
   openstack_compute_single_address_ranges: 10.167.11.15-10.167.11.17
-  openstack_compute_deploy_address_ranges: 172.16.164.15-172.16.164.17
+  openstack_compute_deploy_address_ranges: 172.16.180.15-172.16.180.17
   openstack_compute_tenant_address_ranges: 10.167.12.15-10.167.12.17
   openstack_compute_backend_address_ranges: 10.167.12.15-10.167.12.17
   openstack_control_address: 10.167.11.10
@@ -674,9 +674,9 @@
   openstack_database_node03_address: 10.167.11.53
   openstack_database_node03_hostname: dbs03
   openstack_enabled: 'True'
-  openstack_gateway_node01_deploy_address: 172.16.164.9
-  openstack_gateway_node02_deploy_address: 172.16.164.10
-  openstack_gateway_node03_deploy_address: 172.16.164.11
+  openstack_gateway_node01_deploy_address: 172.16.180.9
+  openstack_gateway_node02_deploy_address: 172.16.180.10
+  openstack_gateway_node03_deploy_address: 172.16.180.11
   openstack_gateway_node01_address: 10.167.11.224
   openstack_gateway_node01_hostname: gtw01
   openstack_gateway_node02_hostname: gtw02
@@ -722,7 +722,7 @@
   salt_api_password_hash: $6$qdIFillN$XnzP7oIXRcbroVch7nlthyrSekjKlWND8q2MtoMF3Wz2ymepjAOjyqpyR55nmbH9OQzS8EcQJ6sfr5hWKDesV1
   salt_master_address: 10.167.11.5
   salt_master_hostname: cfg01
-  salt_master_management_address: 172.16.164.2
+  salt_master_management_address: 172.16.180.2
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.11.60
   stacklight_log_hostname: log
@@ -779,7 +779,7 @@
   ceph_public_network_allocation: storage
   ceph_cluster_network: "10.167.11.0/24"
   ceph_osd_single_address_ranges: "10.167.11.200-10.167.11.202"
-  ceph_osd_deploy_address_ranges: "172.16.164.8-172.16.164.10"
+  ceph_osd_deploy_address_ranges: "172.16.180.8-172.16.180.10"
   ceph_osd_storage_address_ranges: "10.167.11.200-10.167.11.202"
   ceph_osd_backend_address_ranges: "10.167.12.200-10.167.12.202"
 
@@ -816,6 +816,7 @@
   openstack_mysql_x509_enabled: 'True'
   rabbitmq_ssl_enabled: 'True'
   openstack_rabbitmq_x509_enabled: 'True'
+  openstack_rabbitmq_standalone_mode: 'True'
   openstack_internal_protocol: 'https'
   tenant_telemetry_enabled: 'True'
   gnocchi_aggregation_storage: ceph
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml b/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml
index c9fd9d6..617b1fa 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay--user-data-foundation.yaml
@@ -68,7 +68,7 @@
       auto ens3
       iface ens3 inet static
       address $management_static_ip
-      netmask 255.255.255.192
+      netmask 255.255.254.0
       gateway $management_gw
       dns-nameservers $dnsaddress
 
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay.hot b/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay.hot
index eac31bf..4306ae5 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay.hot
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay.hot
@@ -33,19 +33,19 @@
     default: "172.17.42.0/26"
   management_subnet_cidr:
     type: string
-    default: "172.16.164.0/26"
+    default: "172.16.180.0/23"
   management_subnet_cfg01_ip:
     type: string
-    default: 172.16.164.2
+    default: 172.16.180.2
   management_subnet_gateway_ip:
     type: string
-    default: 172.16.164.1
+    default: 172.16.180.1
   management_subnet_pool_start:
     type: string
-    default: 172.16.164.3
+    default: 172.16.180.3
   management_subnet_pool_end:
     type: string
-    default: 172.16.164.61
+    default: 172.16.180.61
   salt_master_control_ip:
     type: string
     default: 10.167.11.5
diff --git a/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-cookiecutter-openstack_odl.yaml b/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-cookiecutter-openstack_odl.yaml
index a392ae7..c09d859 100644
--- a/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-cookiecutter-openstack_odl.yaml
+++ b/tcp_tests/templates/bm-e7-cicd-pike-odl-maas/salt-context-cookiecutter-openstack_odl.yaml
@@ -435,6 +435,6 @@
   stacklight_ssl_enabled: 'True'
 
   # Enable Mirantis repo with CVE fixes for xenial
-  updates_mirantis_login: "root"
-  updates_mirantis_password: "r00tme"
+  updates_mirantis_login: "%LOGIN%"
+  updates_mirantis_password: "%PASS%"
   updates_mirantis_version: "staging"
\ No newline at end of file
diff --git a/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index 74e0912..026932b 100644
--- a/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -436,6 +436,6 @@
   stacklight_ssl_enabled: 'True'
 
   # Enable Mirantis repo with CVE fixes for xenial
-  updates_mirantis_login: "root"
-  updates_mirantis_password: "r00tme"
+  updates_mirantis_login: "%LOGIN%"
+  updates_mirantis_password: "%PASS%"
   updates_mirantis_version: "staging"
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-model-generator/salt_bm-b300-cicd-queens-ovs-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_bm-b300-cicd-queens-ovs-maas.yaml
new file mode 100644
index 0000000..73b5ad6
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_bm-b300-cicd-queens-ovs-maas.yaml
@@ -0,0 +1,72 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+# Other salt model repository parameters see in shared-salt.yaml
+{% set LAB_CONFIG_NAME = 'bm-b300-cicd-queens-ovs-maas' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','bm-b300-cicd-queens-ovs-maas') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-environment.yaml','salt-context-cookiecutter-openstack_ovs.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2404') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2406') %}
+
+{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
+{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
+{%- set CISCO_PASS = os_env('CISCO_PASS', 'cisco_pass') %}
+
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_UPLOAD_AND_IMPORT_GPG_ENCRYPTION_KEY() }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+  cmd: |
+    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-baremetal/br\_baremetal/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
+    salt '*' saltutil.refresh_pillar;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
+- description: Defining username and password params for IPMI access
+  cmd: |
+    sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+    sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+    sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/baremetal_nodes.yml;
+    sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/baremetal_nodes.yml;
+    sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/switch.yml;
+    sed -i 's/==CISCO_PASS==/${_param:cisco_password}/g' /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/switch.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "Add user/password for IPMI access"
+  cmd: |
+    set -e;
+    set -x;
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+    reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
+    reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/baremetal_nodes.yml;
+    reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/baremetal_nodes.yml;
+    reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/switch.yml;
+    reclass-tools add-key parameters._param.cisco_password {{ CISCO_PASS }} /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/switch.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml
index 50653c2..71c6fe5 100644
--- a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml
@@ -136,7 +136,7 @@
             one1:
               mac: "0c:c4:7a:33:24:be"
               mode: "static"
-              ip: "172.16.164.3"
+              ip: "172.16.180.3"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -173,7 +173,7 @@
             one1:
               mac: "0c:c4:7a:33:2d:6a"
               mode: "static"
-              ip: "172.16.164.4"
+              ip: "172.16.180.4"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -210,7 +210,7 @@
             one1:
               mac: "0c:c4:7a:69:a0:4c"
               mode: "static"
-              ip: "172.16.164.5"
+              ip: "172.16.180.5"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -248,7 +248,7 @@
             one1:
               mac: "0c:c4:7a:6c:83:5c"
               mode: "static"
-              ip: "172.16.164.6"
+              ip: "172.16.180.6"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -285,7 +285,7 @@
             one1:
               mac: "0c:c4:7a:6c:88:d6"
               mode: "static"
-              ip: "172.16.164.7"
+              ip: "172.16.180.7"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -322,7 +322,7 @@
             one1:
               mac: "0c:c4:7a:aa:df:ac"
               mode: "static"
-              ip: "172.16.164.8"
+              ip: "172.16.180.8"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -359,7 +359,7 @@
             #            one1:
             #              mac: "0c:c4:7a:aa:d5:84"
             #              mode: "static"
-            #              ip: "172.16.164.9"
+            #              ip: "172.16.180.9"
             #              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
             #              gateway: ${_param:deploy_network_gateway}
             #              name: one1
@@ -396,7 +396,7 @@
             #            one1:
             #              mac: "0c:c4:7a:aa:d5:82"
             #              mode: "static"
-            #              ip: "172.16.164.10"
+            #              ip: "172.16.180.10"
             #              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
             #              gateway: ${_param:deploy_network_gateway}
             #              name: one1
@@ -433,7 +433,7 @@
             #            one1:
             #              mac: "0c:c4:7a:6c:bc:f6"
             #              mode: "static"
-            #              ip: "172.16.164.11"
+            #              ip: "172.16.180.11"
             #              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
             #              gateway: ${_param:deploy_network_gateway}
             #              name: one1
@@ -470,7 +470,7 @@
             one1:
               mac: "0c:c4:7a:aa:c9:02"
               mode: "static"
-              ip: "172.16.164.12"
+              ip: "172.16.180.12"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -507,7 +507,7 @@
             one1:
               mac: "0c:c4:7a:aa:d5:60"
               mode: "static"
-              ip: "172.16.164.13"
+              ip: "172.16.180.13"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -544,7 +544,7 @@
             one1:
               mac: "0c:c4:7a:aa:c9:3a"
               mode: "static"
-              ip: "172.16.164.14"
+              ip: "172.16.180.14"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -581,7 +581,7 @@
             one1:
               mac: "0c:c4:7a:aa:d6:aa"
               mode: "static"
-              ip: "172.16.164.15"
+              ip: "172.16.180.15"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -618,7 +618,7 @@
             one1:
               mac: "0c:c4:7a:aa:ce:30"
               mode: "static"
-              ip: "172.16.164.16"
+              ip: "172.16.180.16"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -659,7 +659,7 @@
             one1:
               mac: "0c:c4:7a:aa:e0:ce"
               mode: "static"
-              ip: "172.16.164.17"
+              ip: "172.16.180.17"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
diff --git a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/underlay.hot b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/underlay.hot
index e11335f..8b01d59 100644
--- a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/underlay.hot
+++ b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/underlay.hot
@@ -35,19 +35,19 @@
     default: "10.9.0.0/24"
   management_subnet_cidr:
     type: string
-    default: "172.16.164.0/26"
+    default: "172.16.180.0/23"
   management_subnet_cfg01_ip:
     type: string
-    default: 172.16.164.2
+    default: 172.16.180.2
   management_subnet_gateway_ip:
     type: string
-    default: 172.16.164.1
+    default: 172.16.180.1
   management_subnet_pool_start:
     type: string
-    default: 172.16.164.3
+    default: 172.16.180.3
   management_subnet_pool_end:
     type: string
-    default: 172.16.164.61
+    default: 172.16.180.61
   salt_master_control_ip:
     type: string
     default: 10.6.0.15
diff --git a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml
index d08c4c8..adac30b 100644
--- a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml
@@ -136,7 +136,7 @@
             one1:
               mac: "0c:c4:7a:33:24:be"
               mode: "static"
-              ip: "172.16.164.3"
+              ip: "172.16.180.3"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -173,7 +173,7 @@
             one1:
               mac: "0c:c4:7a:33:2d:6a"
               mode: "static"
-              ip: "172.16.164.4"
+              ip: "172.16.180.4"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -210,7 +210,7 @@
             one1:
               mac: "0c:c4:7a:69:a0:4c"
               mode: "static"
-              ip: "172.16.164.5"
+              ip: "172.16.180.5"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -248,7 +248,7 @@
             one1:
               mac: "0c:c4:7a:6c:83:5c"
               mode: "static"
-              ip: "172.16.164.6"
+              ip: "172.16.180.6"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -285,7 +285,7 @@
             one1:
               mac: "0c:c4:7a:6c:88:d6"
               mode: "static"
-              ip: "172.16.164.7"
+              ip: "172.16.180.7"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -322,7 +322,7 @@
             one1:
               mac: "0c:c4:7a:aa:df:ac"
               mode: "static"
-              ip: "172.16.164.8"
+              ip: "172.16.180.8"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -359,7 +359,7 @@
             #            one1:
             #              mac: "0c:c4:7a:aa:d5:84"
             #              mode: "static"
-            #              ip: "172.16.164.9"
+            #              ip: "172.16.180.9"
             #              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
             #              gateway: ${_param:deploy_network_gateway}
             #              name: one1
@@ -396,7 +396,7 @@
             #            one1:
             #              mac: "0c:c4:7a:aa:d5:82"
             #              mode: "static"
-            #              ip: "172.16.164.10"
+            #              ip: "172.16.180.10"
             #              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
             #              gateway: ${_param:deploy_network_gateway}
             #              name: one1
@@ -433,7 +433,7 @@
             #            one1:
             #              mac: "0c:c4:7a:6c:bc:f6"
             #              mode: "static"
-            #              ip: "172.16.164.11"
+            #              ip: "172.16.180.11"
             #              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
             #              gateway: ${_param:deploy_network_gateway}
             #              name: one1
@@ -470,7 +470,7 @@
             one1:
               mac: "0c:c4:7a:aa:c9:02"
               mode: "static"
-              ip: "172.16.164.12"
+              ip: "172.16.180.12"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -507,7 +507,7 @@
             one1:
               mac: "0c:c4:7a:aa:d5:60"
               mode: "static"
-              ip: "172.16.164.13"
+              ip: "172.16.180.13"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -544,7 +544,7 @@
             one1:
               mac: "0c:c4:7a:aa:c9:3a"
               mode: "static"
-              ip: "172.16.164.14"
+              ip: "172.16.180.14"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -581,7 +581,7 @@
             one1:
               mac: "0c:c4:7a:aa:d6:aa"
               mode: "static"
-              ip: "172.16.164.15"
+              ip: "172.16.180.15"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -618,7 +618,7 @@
             one1:
               mac: "0c:c4:7a:aa:ce:30"
               mode: "static"
-              ip: "172.16.164.16"
+              ip: "172.16.180.16"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
@@ -659,7 +659,7 @@
             one1:
               mac: "0c:c4:7a:aa:e0:ce"
               mode: "static"
-              ip: "172.16.164.17"
+              ip: "172.16.180.17"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
               name: one1
diff --git a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/underlay.hot b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/underlay.hot
index b02b758..8bc2a84 100644
--- a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/underlay.hot
+++ b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/underlay.hot
@@ -35,19 +35,19 @@
     default: "10.9.0.0/24"
   management_subnet_cidr:
     type: string
-    default: "172.16.164.0/26"
+    default: "172.16.180.0/23"
   management_subnet_cfg01_ip:
     type: string
-    default: 172.16.164.2
+    default: 172.16.180.2
   management_subnet_gateway_ip:
     type: string
-    default: 172.16.164.1
+    default: 172.16.180.1
   management_subnet_pool_start:
     type: string
-    default: 172.16.164.3
+    default: 172.16.180.3
   management_subnet_pool_end:
     type: string
-    default: 172.16.164.61
+    default: 172.16.180.61
   salt_master_control_ip:
     type: string
     default: 10.6.0.15
diff --git a/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml b/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml
index 58fdf66..08076e4 100644
--- a/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml
+++ b/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml
@@ -374,6 +374,6 @@
     change_password_upon_first_use: False
 
   # Enable Mirantis repo with CVE fixes for xenial
-  updates_mirantis_login: "root"
-  updates_mirantis_password: "r00tme"
+  updates_mirantis_login: "%LOGIN%"
+  updates_mirantis_password: "%PASS%"
   updates_mirantis_version: "staging"
diff --git a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml
index 3f99b08..6449796 100644
--- a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml
@@ -310,6 +310,6 @@
     change_password_upon_first_use: False
 
   # Enable Mirantis repo with CVE fixes for xenial
-  updates_mirantis_login: "root"
-  updates_mirantis_password: "r00tme"
+  updates_mirantis_login: "%LOGIN%"
+  updates_mirantis_password: "%PASS%"
   updates_mirantis_version: "staging"
\ No newline at end of file
diff --git a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
index b0cf6e8..d8978f0 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
@@ -316,6 +316,6 @@
     change_password_upon_first_use: False
 
   # Enable Mirantis repo with CVE fixes for xenial
-  updates_mirantis_login: "root"
-  updates_mirantis_password: "r00tme"
+  updates_mirantis_login: "%LOGIN%"
+  updates_mirantis_password: "%PASS%"
   updates_mirantis_version: "staging"
\ No newline at end of file
diff --git a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
index c26143e..6e575b1 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
+++ b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
@@ -293,6 +293,7 @@
   galera_ssl_enabled: 'True'
   openstack_mysql_x509_enabled: 'True'
   rabbitmq_ssl_enabled: 'True'
+  openstack_rabbitmq_standalone_mode: 'True'
   openstack_rabbitmq_x509_enabled: 'True'
   openstack_internal_protocol: 'https'
   openstack_create_public_network: 'True'
@@ -379,6 +380,6 @@
 
   stacklight_ssl_enabled: 'True'
   # Enable Mirantis repo with CVE fixes for xenial
-  updates_mirantis_login: "root"
-  updates_mirantis_password: "r00tme"
+  updates_mirantis_login: "%LOGIN%"
+  updates_mirantis_password: "%PASS%"
   updates_mirantis_version: "staging"
\ No newline at end of file
diff --git a/tcp_tests/tests/system/test_backup_restore_galera.py b/tcp_tests/tests/system/test_backup_restore_galera.py
index 49f6234..573756a 100644
--- a/tcp_tests/tests/system/test_backup_restore_galera.py
+++ b/tcp_tests/tests/system/test_backup_restore_galera.py
@@ -77,21 +77,18 @@
         Scenario:
             1. Create flavor to be backuped
             2. Run job galera_backup_database
-            3. Run tests with cvp-sanity job
-            4. Run tests with cvp-tempest job
-            5. Create flavor not to be restored
-            6. Run job galera_verify_restore
-            7. If jobs are passed then start tests with cvp-sanity job
-            8. Run tests with cvp-tempest job
+            3. Create flavor not to be restored
+            4. Run job galera_verify_restore
+
         """
         dt = drivetrain_actions
-        salt = salt_actions
 
         show_step(1)
-        cfg_node = self._get_cfg_fqn(salt)
+        ctl_node = underlay_actions.get_target_node_names(target='ctl')[0]
+        print(ctl_node)
         fixture_flavor1 = 'test1'
         fixture_flavor2 = 'test2'
-        self.create_flavor(underlay_actions, fixture_flavor1, cfg_node)
+        self.create_flavor(underlay_actions, fixture_flavor1, ctl_node)
         # ################## Run backup job #########################
         show_step(2)
         job_name = 'galera_backup_database'
@@ -104,45 +101,10 @@
 
         assert job_result == 'SUCCESS', job_description
 
-        # ######################## Run CPV ###########################
         show_step(3)
-        job_name = 'cvp-sanity'
-        job_cvp_sanity_parameters = {
-            'EXTRA_PARAMS': '''
-
-                envs:
-                  - skipped_packages='{},{},{},{},{},{}'
-                  - skipped_modules='xunitmerge,setuptools'
-                  - skipped_services='docker,containerd'
-                  - ntp_skipped_nodes=''
-                  - tests_set=-k "not {} and not {} and not {}"
-            '''.format('python-setuptools', 'python-pkg-resources',
-                       'xunitmerge', 'python-gnocchiclient',
-                       'python-ujson', 'python-octaviaclient',
-                       'test_ceph_status', 'test_prometheus_alert_count',
-                       'test_uncommited_changes')
-        }
-        job_result, job_description = dt.start_job_on_jenkins(
-            job_name=job_name,
-            job_parameters=job_cvp_sanity_parameters)
-
-        assert job_result == 'SUCCESS', job_description
-
-        # ######################## Run Tempest ###########################
-        show_step(4)
-        job_name = 'cvp-tempest'
-        job_parameters = {
-             'TEMPEST_ENDPOINT_TYPE': 'internalURL'
-        }
-        job_result, job_description = dt.start_job_on_jenkins(
-            job_name=job_name,
-            job_parameters=job_parameters)
-
-        assert job_result == 'SUCCESS', job_description
-        show_step(5)
-        self.create_flavor(underlay_actions, fixture_flavor2, cfg_node)
+        self.create_flavor(underlay_actions, fixture_flavor2, ctl_node)
         # ######################## Run Restore ###########################
-        show_step(6)
+        show_step(4)
         job_name = 'galera_verify_restore'
         job_parameters = {
              'RESTORE_TYPE': 'ONLY_RESTORE',
@@ -156,28 +118,8 @@
 
         assert self.is_flavor_restored(underlay_actions,
                                        fixture_flavor1,
-                                       cfg_node)
+                                       ctl_node)
         assert self.is_flavor_restored(underlay_actions,
                                        fixture_flavor2,
-                                       cfg_node) is False
-        # ######################## Run CPV ###########################
-        show_step(7)
-
-        job_name = 'cvp-sanity'
-        job_result, job_description = dt.start_job_on_jenkins(
-            job_name=job_name,
-            job_parameters=job_cvp_sanity_parameters)
-
-        assert job_result == 'SUCCESS', job_description
-        # ######################## Run Tempest ###########################
-        show_step(8)
-        job_name = 'cvp-tempest'
-        job_parameters = {
-             'TEMPEST_ENDPOINT_TYPE': 'internalURL'
-        }
-        job_result, job_description = dt.start_job_on_jenkins(
-            job_name=job_name,
-            job_parameters=job_parameters)
-
-        assert job_result == 'SUCCESS', job_description
-        self.delete_flavor(underlay_actions, fixture_flavor1, cfg_node)
+                                       ctl_node) is False
+        self.delete_flavor(underlay_actions, fixture_flavor1, ctl_node)
diff --git a/tcp_tests/tests/system/test_ceph_operations.py b/tcp_tests/tests/system/test_ceph_operations.py
index 6f2fb52..b2f98b0 100644
--- a/tcp_tests/tests/system/test_ceph_operations.py
+++ b/tcp_tests/tests/system/test_ceph_operations.py
@@ -1,70 +1,10 @@
 import pytest
 
 from tcp_tests import logger
+from string import Template
 
 LOG = logger.logger
 
-xtra_network_interface = """
-parameters:
-  _param:
-    linux_network_interfaces:
-      br_ctl:
-        address: ${_param:single_address}
-        enabled: True
-        name_servers:
-        - ${_param:dns_server01}
-        - ${_param:dns_server02}
-        netmask: ${_param:control_network_netmask}
-        proto: static
-        require_interfaces: ['ens4']
-        type: bridge
-        use_interfaces: ['ens4']
-      ens3:
-        enabled: True
-        name: ens3
-        proto: dhcp
-        type: eth
-      ens4:
-        enabled: True
-        ipflush_onchange: True
-        name: ens4
-        proto: manual
-        type: eth
-"""
-
-add_osd_ceph_init_yml = """
-parameters:
-  _param:
-    ceph_osd_node04_hostname: xtra
-    ceph_osd_node04_address: 10.6.0.205
-    ceph_osd_system_codename: xenial
-  linux:
-    network:
-      host:
-        xtra:
-          address: ${_param:ceph_osd_node04_address}
-          names:
-          - ${_param:ceph_osd_node04_hostname}
-          - ${_param:ceph_osd_node04_hostname}.${_param:cluster_domain}
-        """
-
-add_osd_config_init_yml = """
-parameters:
-  reclass:
-    storage:
-      node:
-        ceph_osd_node04:
-          name: ${_param:ceph_osd_node04_hostname}
-          domain: ${_param:cluster_domain}
-          classes:
-          - cluster.${_param:cluster_name}.ceph.osd
-          params:
-            salt_master_host: ${_param:reclass_config_master}
-            linux_system_codename:  ${_param:ceph_osd_system_codename}
-            single_address: ${_param:ceph_osd_node04_address}
-            ceph_crush_parent: rack02
-"""
-
 
 @pytest.fixture(scope='session')
 def add_xtra_node_to_salt(salt_actions, underlay_actions,
@@ -80,27 +20,19 @@
     cfg_node = [node['node_name'] for node in config.underlay.ssh
                 if 'salt_master' in node.get('roles')][0]
 
-    salt_actions.enforce_state("*", "reclass")
-    reclass_actions.add_class(
-        "environment.heat-cicd-queens-dvr-sl.linux_network_interface",
-        short_path="../nodes/_generated/xtra.*.yml")
-    reclass_actions.add_class("environment.heat-cicd-queens-dvr-sl.overrides",
-                              short_path="../nodes/_generated/xtra.*.yml")
-    reclass_actions.merge_context(yaml_context=xtra_network_interface,
-                                  short_path="../nodes/_generated/xtra.*.yml")
+    # salt_actions.enforce_state("I@salt:master", "reclass")
 
     underlay_actions.check_call(
             "salt-key -a {node} --include-all -y".format(node=xtra_node),
             node_name=cfg_node,
             raise_on_err=False)
     # Need to restart salt-minion service after accepting it in Salt Master
-    underlay_actions.check_call(
-        "systemctl restart salt-minion",
-        node_name=xtra_node,
-        raise_on_err=False)
-    salt_actions.enforce_state("I@salt:master", "reclass")
-    salt_actions.enforce_state("xtra*", "linux")
-    salt_actions.enforce_state("xtra*", "openssh")
+    # underlay_actions.check_call(
+    #     "systemctl restart salt-minion",
+    #     node_name=xtra_node,
+    #     raise_on_err=False)
+    # salt_actions.enforce_state("xtra*", "linux")
+    # salt_actions.enforce_state("xtra*", "openssh")
 
     yield
 
@@ -114,12 +46,71 @@
 @pytest.fixture(scope='session')
 def wa_prod36167(reclass_actions):
     reclass_actions.delete_class("system.salt.control.virt",
-                                 "classes/cluster/*/infra/kvm.yml")
+                                 "cluster/*/infra/kvm.yml")
 
 
-@pytest.mark.usefixtures("add_xtra_node_to_salt")
+@pytest.mark.usefixtures("add_xtra_node_to_salt",
+                         "wa_prod36167")
 class TestCephOsd(object):
 
+    add_osd_ceph_init_yml = """
+    parameters:
+      _param:
+        ceph_osd_node04_hostname: xtra
+        ceph_osd_node04_address: 10.6.0.205
+        ceph_osd_system_codename: xenial
+      linux:
+        network:
+          host:
+            xtra:
+              address: ${_param:ceph_osd_node04_address}
+              names:
+              - ${_param:ceph_osd_node04_hostname}
+              - ${_param:ceph_osd_node04_hostname}.${_param:cluster_domain}
+            """
+
+    add_osd_config_init_yml = """
+    parameters:
+      reclass:
+        storage:
+          node:
+            ceph_osd_node04:
+              name: ${_param:ceph_osd_node04_hostname}
+              domain: ${_param:cluster_domain}
+              classes:
+              - cluster.${_param:cluster_name}.ceph.osd
+              - environment.heat-cicd-queens-dvr-sl.linux_network_interface
+              - environment.heat-cicd-queens-dvr-sl.overrides
+              params:
+                salt_master_host: ${_param:reclass_config_master}
+                linux_system_codename:  ${_param:ceph_osd_system_codename}
+                single_address: ${_param:ceph_osd_node04_address}
+                ceph_crush_parent: rack02
+                linux_network_interfaces:
+                  br_ctl:
+                    address: ${_param:ceph_osd_node04_address}
+                    enabled: True
+                    name_servers:
+                    - ${_param:dns_server01}
+                    - ${_param:dns_server02}
+                    netmask: ${_param:control_network_netmask}
+                    proto: static
+                    require_interfaces: ['ens4']
+                    type: bridge
+                    use_interfaces: ['ens4']
+                  ens3:
+                    enabled: True
+                    name: ens3
+                    proto: dhcp
+                    type: eth
+                  ens4:
+                    enabled: True
+                    ipflush_onchange: True
+                    name: ens4
+                    proto: manual
+                    type: eth
+    """
+
     @pytest.fixture
     def describe_node_in_reclass(self,
                                  reclass_actions,
@@ -127,14 +118,17 @@
         LOG.info("Executing pytest SETUP "
                  "from describe_node_in_reclass fixture")
         reclass = reclass_actions
+
         # ---- cluster/*/ceph/init.yml ---------------
-        reclass.merge_context(yaml_context=add_osd_ceph_init_yml,
+        reclass.merge_context(yaml_context=self.add_osd_ceph_init_yml,
                               short_path="cluster/*/ceph/init.yml")
 
         # ------- cluster/infra/config/init.yml -----------
-        reclass.merge_context(yaml_context=add_osd_config_init_yml,
-                              short_path="cluster/*/infra/config/init.yml")
-        salt_actions.run_state("*", "saltutil.refresh_pillar")
+        reclass.merge_context(yaml_context=build_node_config('osd'),
+                              short_path="cluster/*/infra/config/nodes.yml")
+
+        # salt_actions.run_state("*", "saltutil.refresh_pillar")
+        # salt_actions.enforce_state("I@salt:master", "reclass")
 
     @pytest.fixture
     def remove_node_from_reclass(self,
@@ -143,7 +137,7 @@
 
         reclass.delete_key(
             key="parameters.reclass.storage.node.ceph_osd_node04",
-            short_path="cluster/*/infra/config/init.yml"
+            short_path="cluster/*/infra/config/nodes.yml"
             )
         reclass.delete_key(
             key="parameters.linux.network.host.xtra",
@@ -211,76 +205,36 @@
         assert job_result == 'SUCCESS', job_description
 
 
-add_mon_ceph_init_yml = """
-parameters:
-  _param:
-    ceph_mon_node04_hostname: xtra
-    ceph_mon_node04_address: 10.6.0.205
-    ceph_mon_node04_ceph_public_address: 10.166.49.209
-    ceph_mon_node04_ceph_backup_hour: 4
-    ceph_mon_node04_ceph_backup_minute: 0
-  linux:
-    network:
-      host:
-        xtra:
-          address: ${_param:ceph_mon_node04_address}
-          names:
-          - ${_param:ceph_mon_node04_hostname}
-          - ${_param:ceph_mon_node04_hostname}.${_param:cluster_domain}
-"""
-
-add_mon_ceph_common_yml = """
-parameters:
-  ceph:
-    common:
-      members:
-        - name: ${_param:ceph_mon_node04_hostname}
-          host: ${_param:ceph_mon_node04_address}
-"""
-
-add_mon_config_node_yml = """
-parameters:
-  reclass:
-    storage:
-      node:
-        ceph_mon_node04:
-          name: ${_param:ceph_mon_node04_hostname}
-          domain: ${_param:cluster_domain}
-          classes:
-          - cluster.${_param:cluster_name}.ceph.mon
-          params:
-            ceph_public_address: ${_param:ceph_mon_node04_ceph_public_address}
-            ceph_backup_time_hour: ${_param:ceph_mon_node04_ceph_backup_hour}
-            ceph_backup_time_minute: ${_param:ceph_mon_node04_ceph_backup_minute}
-            salt_master_host: ${_param:reclass_config_master}
-            linux_system_codename: ${_param:ceph_mon_system_codename}
-            single_address: ${_param:ceph_mon_node04_address}
-            keepalived_vip_priority: 104
-"""  # noqa: E501
-
-add_mon_infra_kvm_yml = """
-parameters:
-  salt:
-    control:
-      size:
-        ceph.mon:
-          cpu: 8
-          ram: 16384
-          disk_profile: small
-          net_profile: default
-      cluster:
-        internal:
-          node:
-            cmn04:
-              name: ${_param:ceph_mon_node04_hostname}
-              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
-              image: ${_param:salt_control_xenial_image}
-              size: ceph.mon
-"""  # noqa: E501
-
-
-@pytest.mark.usefixtures("add_xtra_node_to_salt")
+@pytest.mark.usefixtures("add_xtra_node_to_salt",
+                         "wa_prod36167")
 class TestCephMon(object):
+    add_mon_ceph_init_yml = """
+    parameters:
+      _param:
+        ceph_mon_node04_hostname: xtra
+        ceph_mon_node04_address: 10.6.0.205
+        ceph_mon_node04_ceph_public_address: 10.166.49.209
+        ceph_mon_node04_ceph_backup_hour: 4
+        ceph_mon_node04_ceph_backup_minute: 0
+      linux:
+        network:
+          host:
+            xtra:
+              address: ${_param:ceph_mon_node04_address}
+              names:
+              - ${_param:ceph_mon_node04_hostname}
+              - ${_param:ceph_mon_node04_hostname}.${_param:cluster_domain}
+    """
+
+    add_mon_ceph_common_yml = """
+    parameters:
+      ceph:
+        common:
+          members:
+            - name: ${_param:ceph_mon_node04_hostname}
+              host: ${_param:ceph_mon_node04_address}
+    """
+
     @pytest.fixture
     def describe_node_in_reclass(self,
                                  reclass_actions, salt_actions):
@@ -288,18 +242,20 @@
                  "from describe_node_in_reclass fixture")
         reclass = reclass_actions
         # ---- cluster/*/ceph/init.yml --------------
-        reclass.merge_context(yaml_context=add_mon_ceph_init_yml,
+        reclass.merge_context(yaml_context=self.add_mon_ceph_init_yml,
                               short_path="cluster/*/ceph/init.yml")
 
         # ------- cluster/infra/config/init.yml -----------
-        reclass.merge_context(yaml_context=add_mon_ceph_common_yml,
+        reclass.merge_context(yaml_context=self.add_mon_ceph_common_yml,
                               short_path="cluster/*/ceph/common.yml")
-        reclass.merge_context(yaml_context=add_mon_config_node_yml,
+        reclass.merge_context(yaml_context=build_node_config('mon'),
                               short_path="cluster/*/infra/config/nodes.yml")
 
         # ------- define settings for new mon node in KVM cluster -----------
-        reclass.merge_context(yaml_context=add_mon_infra_kvm_yml,
-                              short_path="cluster/*/infra/kvm.yml")
+        # Commented because we don't add VM machine, we add already
+        # deployed node
+        # reclass.merge_context(yaml_context=add_mon_infra_kvm_yml,
+        #                       short_path="cluster/*/infra/kvm.yml")
 
         salt_actions.run_state("*", "saltutil.refresh_pillar")
 
@@ -311,7 +267,7 @@
         reclass = reclass_actions
         reclass.delete_key(
             key="parameters.reclass.storage.node.ceph_mon_node04",
-            short_path="cluster/*/infra/config/init.yml")
+            short_path="cluster/*/infra/config/nodes.yml")
         reclass.delete_key(
             key="parameters.salt.control.cluster.internal.node.cmn04",
             short_path="cluster/*/infra/kvm.yml"
@@ -360,67 +316,48 @@
         assert job_result == 'SUCCESS', job_description
 
 
-add_rgw_ceph_init_yml = """
-parameters:
-    _param:
-        ceph_rgw_node04_hostname: xtra
-        ceph_rgw_node04_address: 10.6.0.205
-        ceph_rgw_node04_ceph_public_address: 10.166.49.209
-    linux:
-        network:
-            host:
-                rgw04:
-                    address: ${_param:ceph_rgw_node04_address}
-                    names:
-                    - ${_param:ceph_rgw_node04_hostname}
-                    - ${_param:ceph_rgw_node04_hostname}.${_param:cluster_domain}
-"""  # noqa: E501
-
-add_rgw_ceph_rgw_yml = """
-parameters:
-  _param:
-    cluster_node04_hostname: ${_param:ceph_rgw_node04_hostname}
-    cluster_node04_address: ${_param:ceph_rgw_node04_address}
-  ceph:
-    common:
-      keyring:
-        rgw.xtra:
-          caps:
-            mon: "allow rw"
-            osd: "allow rwx"
-  haproxy:
-    proxy:
-      listen:
-        radosgw:
-          servers:
-            - name: ${_param:cluster_node04_hostname}
-              host: ${_param:cluster_node04_address}
-              port: ${_param:haproxy_radosgw_source_port}
-              params: check
-"""
-
-add_rgw_config_init_yml = """
-parameters:
-  reclass:
-    storage:
-      node:
-        ceph_rgw_node04:
-          name: ${_param:ceph_rgw_node04_hostname}
-          domain: ${_param:cluster_domain}
-          classes:
-          - cluster.${_param:cluster_name}.ceph.rgw
-          params:
-            salt_master_host: ${_param:reclass_config_master}
-            linux_system_codename:  ${_param:ceph_rgw_system_codename}
-            single_address: ${_param:ceph_rgw_node04_address}
-            deploy_address: ${_param:ceph_rgw_node04_deploy_address}
-            ceph_public_address: ${_param:ceph_rgw_node04_public_address}
-            keepalived_vip_priority: 104
-"""
-
-
-@pytest.mark.usefixtures("add_xtra_node_to_salt")
+@pytest.mark.usefixtures("add_xtra_node_to_salt",
+                         "wa_prod36167")
 class TestCephRgw(object):
+    add_rgw_ceph_init_yml = """
+    parameters:
+        _param:
+            ceph_rgw_node04_hostname: xtra
+            ceph_rgw_node04_address: 10.6.0.205
+            ceph_rgw_node04_ceph_public_address: 10.166.49.209
+        linux:
+            network:
+                host:
+                    rgw04:
+                        address: ${_param:ceph_rgw_node04_address}
+                        names:
+                        - ${_param:ceph_rgw_node04_hostname}
+                        - ${_param:ceph_rgw_node04_hostname}.${_param:cluster_domain}
+    """  # noqa: E501
+
+    add_rgw_ceph_rgw_yml = """
+    parameters:
+      _param:
+        cluster_node04_hostname: ${_param:ceph_rgw_node04_hostname}
+        cluster_node04_address: ${_param:ceph_rgw_node04_address}
+      ceph:
+        common:
+          keyring:
+            rgw.xtra:
+              caps:
+                mon: "allow rw"
+                osd: "allow rwx"
+      haproxy:
+        proxy:
+          listen:
+            radosgw:
+              servers:
+                - name: ${_param:cluster_node04_hostname}
+                  host: ${_param:cluster_node04_address}
+                  port: ${_param:haproxy_radosgw_source_port}
+                  params: check
+    """
+
     @pytest.fixture
     def describe_node_in_reclass(self,
                                  reclass_actions, salt_actions):
@@ -428,14 +365,14 @@
                  "from describe_node_in_reclass fixture")
         reclass = reclass_actions
         # ---- cluster/*/ceph/init.yml --------------
-        reclass.merge_context(yaml_context=add_rgw_ceph_init_yml,
+        reclass.merge_context(yaml_context=self.add_rgw_ceph_init_yml,
                               short_path="cluster/*/ceph/init.yml")
 
-        reclass.merge_context(yaml_context=add_rgw_ceph_rgw_yml,
+        reclass.merge_context(yaml_context=self.add_rgw_ceph_rgw_yml,
                               short_path="cluster/*/ceph/rgw.yml")
 
-        reclass.merge_context(yaml_context=add_rgw_config_init_yml,
-                              short_path="cluster/*/infra/config/init.yml")
+        reclass.merge_context(yaml_context=build_node_config('rgw'),
+                              short_path="cluster/*/infra/config/nodes.yml")
 
         salt_actions.run_state("*", "saltutil.refresh_pillar")
 
@@ -492,10 +429,98 @@
         assert job_result == 'SUCCESS', job_description
 
 
-@pytest.mark.usefixtures("add_xtra_node_to_salt")
+@pytest.mark.usefixtures("add_xtra_node_to_salt",
+                         "wa_prod36167")
 class TestCephMgr(object):
     def test_add_node(self):
         pass
 
     def test_delete_node(self):
         pass
+
+
+def build_node_config(node=''):
+    """
+
+    :param node:  [osd, mon, rgw, mgr]
+    :return: string in yaml format
+    """
+
+    class _Template(Template):
+        delimiter = "#"
+        idpattern = '[A-Z]*'
+
+    template = _Template("""
+    parameters:
+      reclass:
+        storage:
+          node:
+            ceph_#NODE_node04:
+              name: ${_param:ceph_#NODE_node04_hostname}
+              domain: ${_param:cluster_domain}
+              classes:
+              - cluster.${_param:cluster_name}.ceph.#NODE
+              - environment.${_param:cluster_name}.linux_network_interface
+              - environment.${_param:cluster_name}.overrides
+              params:
+                salt_master_host: ${_param:reclass_config_master}
+                linux_system_codename: ${_param:ceph_#NODE_system_codename}
+                single_address: ${_param:ceph_#NODE_node04_address}
+                #OSDSETTINGS
+                #MONSETTINGS
+                #RGWSETTINGS
+                linux_network_interfaces:
+                  br_ctl:
+                    address: ${_param:ceph_#NODE_node04_address}
+                    enabled: True
+                    name_servers:
+                    - ${_param:dns_server01}
+                    - ${_param:dns_server02}
+                    netmask: ${_param:control_network_netmask}
+                    proto: static
+                    require_interfaces: ['ens4']
+                    type: bridge
+                    use_interfaces: ['ens4']
+                  ens3:
+                    enabled: True
+                    name: ens3
+                    proto: dhcp
+                    type: eth
+                  ens4:
+                    enabled: True
+                    ipflush_onchange: True
+                    name: ens4
+                    proto: manual
+                    type: eth
+    """)
+
+    data = {
+        'NODE': node,
+        'OSDSETTINGS': '',
+        'MONSETTINGS': '',
+        'RGWSETTINGS': '',
+
+    }
+    # # ------------------OSD specific settings ----------
+    if node == 'osd':
+        data['OSDSETTINGS'] = """
+                ceph_crush_parent: rack02
+                """
+    # # ------------------MON specific settings ----------
+    if node == 'mon':
+        data['MONSETTINGS'] = """
+                ceph_backup_time_hour: ${_param:ceph_mon_node04_ceph_backup_hour}
+                ceph_backup_time_minute: ${_param:ceph_mon_node04_ceph_backup_minute}
+                ceph_public_address: ${_param:ceph_mon_node04_ceph_public_address}
+                keepalived_vip_priority: 104
+                """  # noqa: E501
+    # # ------------------RGW specific settings -----------
+    if node == 'rgw':
+        data['RGWSETTINGS'] = """
+                ceph_public_address: ${_param:ceph_rgw_node04_ceph_public_address}
+                keepalived_vip_priority: 104
+                """  # noqa: E501
+
+    yaml_config = template.substitute(data)
+
+    return yaml_config
diff --git a/tcp_tests/tests/system/test_mcp_update.py b/tcp_tests/tests/system/test_mcp_update.py
index c1456be..45e3cdd 100644
--- a/tcp_tests/tests/system/test_mcp_update.py
+++ b/tcp_tests/tests/system/test_mcp_update.py
@@ -188,6 +188,7 @@
             2. Run job git-mirror-downstream-mk-pipelines
             3. Run job git-mirror-downstream-pipeline-library
             4. If jobs are passed then start 'Deploy - upgrade MCP Drivetrain'
+            5. Wait till salt-master completes all async jobs
 
         Duration: ~70 min
         """
@@ -248,6 +249,9 @@
             build_timeout=4 * 60 * 60)
 
         assert job_result == 'SUCCESS', job_description
+        # ############### Wait till salt-master completes all async jobs #####
+        show_step(5)
+        salt.wait_jobs_completed(timeout=20*60)
 
     @pytest.mark.grab_versions
     @pytest.mark.parametrize("_", [settings.ENV_NAME])