Merge "Fix for k8s-contrail model"
diff --git a/jobs/pipelines/deploy-without-drivetrain-and-run-tests.groovy b/jobs/pipelines/deploy-without-drivetrain-and-run-tests.groovy
new file mode 100644
index 0000000..96ddf76
--- /dev/null
+++ b/jobs/pipelines/deploy-without-drivetrain-and-run-tests.groovy
@@ -0,0 +1,105 @@
+@Library('tcp-qa')_
+
+def common = new com.mirantis.mk.Common()
+def shared = new com.mirantis.system_qa.SharedPipeline()
+def steps = "hardware,create_model,salt," + env.DRIVETRAIN_STACK_INSTALL + "," + env.PLATFORM_STACK_INSTALL
+
+currentBuild.description = "${NODE_NAME}:${ENV_NAME}"
+
+def deploy(shared, common, steps) {
+    def report_text = ''
+    try {
+
+        stage("Clean the environment and clone tcp-qa") {
+            shared.prepare_working_dir()
+        }
+
+        stage("Create environment, generate model, bootstrap the salt-cluster") {
+            // steps: "hardware,create_model,salt"
+            shared.swarm_bootstrap_salt_cluster_devops()
+        }
+
+        stage("Deploy platform components from day01 Jenkins") {
+            // steps: env.PLATFORM_STACK_INSTALL
+            shared.swarm_deploy_platform_non_cicd(env.PLATFORM_STACK_INSTALL, env.PLATFORM_STACK_INSTALL_TIMEOUT)
+        }
+
+        currentBuild.result = 'SUCCESS'
+
+    } catch (e) {
+        common.printMsg("Deploy is failed: " + e.message , "purple")
+        report_text = e.message
+        def snapshot_name = "deploy_failed"
+        shared.run_cmd("""\
+            dos.py suspend ${ENV_NAME} || true
+            dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
+        """)
+        if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+            shared.run_cmd("""\
+                dos.py resume ${ENV_NAME} || true
+            """)
+        }
+        shared.devops_snapshot_info(snapshot_name)
+        throw e
+    } finally {
+        shared.create_deploy_result_report(steps, currentBuild.result, report_text)
+    }
+}
+
+def test(shared, common, steps) {
+    try {
+        stage("Run tests") {
+            shared.swarm_run_pytest(steps)
+        }
+
+    } catch (e) {
+        common.printMsg("Tests are failed: " + e.message, "purple")
+        def snapshot_name = "tests_failed"
+        shared.run_cmd("""\
+            dos.py suspend ${ENV_NAME} || true
+            dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
+        """)
+        if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+            shared.run_cmd("""\
+                dos.py resume ${ENV_NAME} || true
+            """)
+        }
+        shared.devops_snapshot_info(snapshot_name)
+        throw e
+    }
+}
+
+// main
+// Temporary disable throttle to check how it will run
+//throttle(['fuel_devops_environment']) {
+  node ("${NODE_NAME}") {
+    try {
+        // run deploy stages
+        deploy(shared, common, steps)
+        // run test stages
+        test(shared, common, steps)
+    } catch (e) {
+        common.printMsg("Job is failed: " + e.message, "purple")
+        throw e
+    } finally {
+        // shutdown the environment if required
+        if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+            shared.run_cmd("""\
+                dos.py destroy ${ENV_NAME} || true
+            """)
+        }
+
+        stage("Archive all xml reports") {
+            archiveArtifacts artifacts: "**/*.xml,**/*.ini,**/*.log,**/*.tar.gz"
+        }
+        stage("report results to testrail") {
+            shared.swarm_testrail_report(steps)
+        }
+        stage("Store TestRail reports to job description") {
+            def String description = readFile("description.txt")
+            currentBuild.description += "\n${description}"
+        }
+
+    }
+  }
+//}
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-create-cfg-config-drive.groovy b/jobs/pipelines/swarm-create-cfg-config-drive.groovy
index a734235..03e72f1 100644
--- a/jobs/pipelines/swarm-create-cfg-config-drive.groovy
+++ b/jobs/pipelines/swarm-create-cfg-config-drive.groovy
@@ -40,30 +40,35 @@
                           usernameVariable: "GERRIT_USERNAME",
                           passwordVariable: "GERRIT_PASSWORD"]]) {
 
-            dir("mcp-common-scripts-git") {
-                cloned = gerrit.gerritPatchsetCheckout([
-                    credentialsId : "${GERRIT_MCP_CREDENTIALS_ID}",
-                    gerritBranch: "${MCP_VERSION}",
-                    gerritRefSpec: "${MCP_COMMON_SCRIPTS_REFS}",
-                    gerritScheme: "ssh",
-                    gerritName: "${GERRIT_USERNAME}",
-                    gerritHost: "gerrit.mcp.mirantis.net",
-                    gerritPort: "29418",
-                    gerritProject: "mcp/mcp-common-scripts"
-                ])
-            }
-            if (!cloned) {
-                error("Failed to clone the repository mcp/mcp-common-scripts")
-            }
-
             sh ("""\
                 set -ex
                 eval \$(ssh-agent)
                 ssh-add ${GERRIT_KEY}
+                git clone ssh://${GERRIT_USERNAME}@gerrit.mcp.mirantis.net:29418/mcp/mcp-common-scripts mcp-common-scripts-git
                 git clone --mirror ssh://${GERRIT_USERNAME}@gerrit.mcp.mirantis.net:29418/mk/mk-pipelines mk-pipelines
                 git clone --mirror ssh://${GERRIT_USERNAME}@gerrit.mcp.mirantis.net:29418/mcp-ci/pipeline-library pipeline-library
             """)
 
+            if (COMMON_SCRIPTS_COMMIT != '') {
+                sh ("""\
+                    set -ex
+                    cd mcp-common-scripts-git
+                    git checkout ${COMMON_SCRIPTS_COMMIT}
+                    git log -1
+                """)
+            }
+
+            if (MCP_COMMON_SCRIPTS_REFS != '') {
+                sh ("""\
+                    set -ex
+                    eval \$(ssh-agent)
+                    ssh-add ${GERRIT_KEY}
+                    cd mcp-common-scripts-git
+                    git fetch ssh://${GERRIT_USERNAME}@gerrit.mcp.mirantis.com:29418/mcp/mcp-common-scripts ${MCP_COMMON_SCRIPTS_REFS} && git checkout FETCH_HEAD
+                    git log -1
+                """)
+            }
+
             if (PIPELINE_LIBRARY_REF != '') {
                 sh ("""\
                     set -ex
@@ -72,6 +77,8 @@
                     cd pipeline-library
                     git fetch https://gerrit.mcp.mirantis.net/mcp-ci/pipeline-library ${PIPELINE_LIBRARY_REF}
                     git tag ${MCP_VERSION} FETCH_HEAD -f
+                    git branch -f release/${MCP_VERSION} FETCH_HEAD
+                    git log -1
                 """)
             }
             if (MK_PIPELINES_REF != '') {
@@ -82,49 +89,12 @@
                     cd mk-pipelines
                     git fetch https://gerrit.mcp.mirantis.net/mcp-ci/mk-pipelines ${MK_PIPELINES_REF}
                     git tag ${MCP_VERSION} FETCH_HEAD -f
+                    git branch -f release/${MCP_VERSION} FETCH_HEAD
+                    git log -1
                 """)
             }
 
-            // dir("mk-pipelines-git") {
-            //     cloned = gerrit.gerritPatchsetCheckout([
-            //         credentialsId : "${GERRIT_MCP_CREDENTIALS_ID}",
-            //         gerritRefSpec: "${MK_PIPELINES_REF}",
-            //         gerritScheme: "ssh",
-            //         gerritName: "${GERRIT_USERNAME}",
-            //         gerritHost: "gerrit.mcp.mirantis.net",
-            //         gerritPort: "29418",
-            //         gerritProject: "mk/mk-pipelines"
-            //     ])
-            // }
-            // if (!cloned) {
-            //     error("Failed to clone the repository mk/mk-pipelines")
-            // }
-
-            // dir("pipeline-library-git") {
-            //     cloned = gerrit.gerritPatchsetCheckout([
-            //         credentialsId : "${GERRIT_MCP_CREDENTIALS_ID}",
-            //         gerritRefSpec: "${PIPELINE_LIBRARY_REF}",
-            //         gerritScheme: "ssh",
-            //         gerritName: "${GERRIT_USERNAME}",
-            //         gerritHost: "gerrit.mcp.mirantis.net",
-            //         gerritPort: "29418",
-            //         gerritProject: "mcp-ci/pipeline-library"
-            //     ])
-            // }
-            // if (!cloned) {
-            //     error("Failed to clone the repository mcp-ci/pipeline-library")
-            // }
         }
-        //if (PIPELINE_LIBRARY_REF != '') {
-        //   sh "cd pipeline-library; git tag ${MCP_VERSION} FETCH_HEAD -f ; cd .."
-        //}
-        //if (MK_PIPELINES_REF != '') {
-        //   sh "cd mk-pipelines; git tag ${MCP_VERSION} FETCH_HEAD -f; cd .."
-        //}
-
-        // gerrit.gerritPatchsetCheckout() doesn't support clonning bare repository
-        // sh "git clone --mirror mk-pipelines-git mk-pipelines"
-        // sh "git clone --mirror pipeline-library-git pipeline-library"
     }
 
     stage("Prepare arguments for generation config drive") {
@@ -248,6 +218,11 @@
 
     stage("Download config drive to slave") {
         if (DOWNLOAD_CONFIG_DRIVE == 'true') {
+            println "Remove previous config drive ISO"
+            sh("""\
+                rm /home/jenkins/images/${iso_name} || true
+            """)
+
             def b_res = build job: 'download-config-drive',
                 parameters: [
                         string(name: 'IMAGE_URL', value: "${BUILD_URL}/artifact/${iso_name}"),
diff --git a/jobs/pipelines/swarm-deploy-platform-without-cicd.groovy b/jobs/pipelines/swarm-deploy-platform-without-cicd.groovy
new file mode 100644
index 0000000..8d3eb22
--- /dev/null
+++ b/jobs/pipelines/swarm-deploy-platform-without-cicd.groovy
@@ -0,0 +1,80 @@
+/**
+ *
+ * Deploy the product cluster using Jenkins master on CICD cluster
+ *
+ * Expected parameters:
+
+ *   PARENT_NODE_NAME              Name of the jenkins slave to create the environment
+ *   PARENT_WORKSPACE              Path to the workspace of the parent job to use tcp-qa repo
+ *   ENV_NAME                      Fuel-devops environment name
+ *   STACK_INSTALL                 Stacks to install using Jenkins on CICD cluster: "openstack,stacklight"
+ *   STACK_INSTALL_TIMEOUT         Stacks installation timeout
+ *   TCP_QA_REFS                   Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
+ *   SHUTDOWN_ENV_ON_TEARDOWN      optional, shutdown fuel-devops environment at the end of the job
+ *
+ */
+
+@Library('tcp-qa')_
+
+common = new com.mirantis.mk.Common()
+shared = new com.mirantis.system_qa.SharedPipeline()
+
+if (! env.PARENT_NODE_NAME) {
+    error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
+}
+
+currentBuild.description = "${PARENT_NODE_NAME}:${ENV_NAME}"
+
+def install_timeout = env.STACK_INSTALL_TIMEOUT.toInteger()
+
+timeout(time: install_timeout + 600, unit: 'SECONDS') {
+
+    node ("${PARENT_NODE_NAME}") {
+        if (! fileExists("${PARENT_WORKSPACE}")) {
+            error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
+        }
+        dir("${PARENT_WORKSPACE}") {
+
+            if (! env.STACK_INSTALL) {
+                error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
+            }
+
+            if (env.TCP_QA_REFS) {
+                stage("Update working dir to patch ${TCP_QA_REFS}") {
+                    shared.update_working_dir()
+                }
+            }
+
+            try {
+                // Install the cluster
+                stage("Run Jenkins job on day01 [deploy_openstack:${env.STACK_INSTALL}]") {
+                    shared.run_job_on_day01_node(env.STACK_INSTALL, install_timeout)
+                }
+
+                for (stack in "${env.STACK_INSTALL}".split(",")) {
+                    stage("Sanity check the deployed component [${stack}]") {
+                        shared.sanity_check_component(stack)
+                    }
+                    stage("Make environment snapshot [${stack}_deployed]") {
+                        shared.devops_snapshot(stack)
+                    }
+                } // for
+
+            } catch (e) {
+                common.printMsg("Job is failed", "purple")
+                shared.download_logs("deploy_platform_${ENV_NAME}")
+                throw e
+            } finally {
+                // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+                // and report appropriate data to TestRail
+                // TODO(ddmitriev): add checks for the installed stacks
+                if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                    shared.run_cmd("""\
+                        dos.py destroy ${ENV_NAME}
+                    """)
+                }
+            }
+
+        } // dir
+    } // node
+}
\ No newline at end of file
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index 7704d31..0483965 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -152,8 +152,8 @@
 
 def swarm_bootstrap_salt_cluster_devops() {
         def common = new com.mirantis.mk.Common()
-        def cookiecutter_template_commit = env.COOKIECUTTER_TEMPLATE_COMMIT ?: env.MCP_VERSION
-        def salt_models_system_commit = env.SALT_MODELS_SYSTEM_COMMIT ?: env.MCP_VERSION
+        def cookiecutter_template_commit = env.COOKIECUTTER_TEMPLATE_COMMIT ?: "release/${env.MCP_VERSION}"
+        def salt_models_system_commit = env.SALT_MODELS_SYSTEM_COMMIT ?: "release/${env.MCP_VERSION}"
         def tcp_qa_refs = env.TCP_QA_REFS ?: ''
         def mk_pipelines_ref = env.MK_PIPELINES_REF ?: ''
         def pipeline_library_ref = env.PIPELINE_LIBRARY_REF ?: ''
@@ -227,6 +227,22 @@
         build_pipeline_job('swarm-deploy-platform', parameters)
 }
 
+def swarm_deploy_platform_non_cicd(String stack_to_install, String install_timeout) {
+        // Run openstack_deploy job on day01 Jenkins for specified stacks
+        def common = new com.mirantis.mk.Common()
+        def tcp_qa_refs = env.TCP_QA_REFS ?: ''
+        def parameters = [
+                string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
+                string(name: 'PARENT_WORKSPACE', value: pwd()),
+                string(name: 'ENV_NAME', value: "${ENV_NAME}"),
+                string(name: 'STACK_INSTALL', value: stack_to_install),
+                string(name: 'STACK_INSTALL_TIMEOUT', value: install_timeout),
+                string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
+                booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
+            ]
+        build_pipeline_job('swarm-deploy-platform-without-cicd', parameters)
+}
+
 def swarm_run_pytest(String passed_steps) {
         // Run pytest tests
         def common = new com.mirantis.mk.Common()
@@ -288,8 +304,8 @@
         println("IPV4_NET_TENANT=" + IPV4_NET_TENANT)
         println("IPV4_NET_EXTERNAL=" + IPV4_NET_EXTERNAL)
 
-        def cookiecuttertemplate_commit = env.COOKIECUTTER_TEMPLATE_COMMIT ?: env.MCP_VERSION
-        def saltmodels_system_commit = env.SALT_MODELS_SYSTEM_COMMIT ?: env.MCP_VERSION
+        def cookiecuttertemplate_commit = env.COOKIECUTTER_TEMPLATE_COMMIT ?: "release/${env.MCP_VERSION}"
+        def saltmodels_system_commit = env.SALT_MODELS_SYSTEM_COMMIT ?: "release/${env.MCP_VERSION}"
         def tcp_qa_refs = env.TCP_QA_REFS ?: ''
         def environment_template_ref_change = env.ENVIRONMENT_TEMPLATE_REF_CHANGE ?: ''
         def cookiecutter_ref_change = env.COOKIECUTTER_REF_CHANGE ?: ''
@@ -348,7 +364,7 @@
                 string(name: 'MODEL_URL_OBJECT_TYPE', value: "git"),
                 booleanParam(name: 'DOWNLOAD_CONFIG_DRIVE', value: true),
                 string(name: 'MCP_VERSION', value: "${MCP_VERSION}"),
-                string(name: 'COMMON_SCRIPTS_COMMIT', value: "${MCP_VERSION}"),
+                string(name: 'COMMON_SCRIPTS_COMMIT', value: "release/${env.MCP_VERSION}"),
                 string(name: 'NODE_NAME', value: "${NODE_NAME}"),
                 string(name: 'CONFIG_DRIVE_ISO_NAME', value: "${CFG01_CONFIG_IMAGE_NAME}"),
                 string(name: 'SALT_MASTER_DEPLOY_IP', value: SALT_MASTER_IP),
diff --git a/tcp_tests/managers/runtestmanager.py b/tcp_tests/managers/runtestmanager.py
index c6f3c5e..66095dc 100644
--- a/tcp_tests/managers/runtestmanager.py
+++ b/tcp_tests/managers/runtestmanager.py
@@ -53,6 +53,7 @@
         self.master_minion = self.underlay.get_target_minion_ids(
             self.master_host)[0]
         self.__target_name = None
+        self.__target_minion = None
 
     @property
     def salt_api(self):
@@ -70,6 +71,18 @@
                 target_host)[0]
         return self.__target_name
 
+    @property
+    def target_minion(self):
+        if not self.__target_minion:
+            target_host = self.__salt_api.get_single_pillar(
+                tgt=self.master_minion,
+                pillar="runtest:tempest:test_target")
+            if target_host[-1] == "*":
+                target_host = target_host[:-1]
+            self.__target_minion = self.underlay.get_target_minion_ids(
+                target_host)[0]
+        return self.__target_minion
+
     def fetch_arficats(self, username=None, file_format='xml'):
         with self.underlay.remote(node_name=self.target_name,
                                   username=None) as tgt:
@@ -149,7 +162,7 @@
                                          label="Prepare for Tempest")
 
     def run_tempest(self, timeout=600):
-        tgt = self.target_name
+        tgt = self.target_minion
         image_nameversion = "{}:{}".format(self.image_name, self.image_version)
 
         docker_args = (
@@ -188,39 +201,78 @@
         self.__salt_api.execute_commands(commands=commands,
                                          label="Run Tempest tests")
 
+        def simplify_salt_api_return(api_return, only_first_match=True):
+            """
+                Salt API always returns a dict with one key as 'return'
+                and value as a list with dict. For example:
+            For single node:
+                api.local('cfg01*', 'test.ping', expr_form='compound')
+                {u'return':[{u'cfg01.cookied-cicd-queens-dvr-sl.local':True}]}
+            For multinode:
+                api.local('ctl*', 'test.ping', expr_form='compound')
+                {u'return': [{u'ctl01.cookied-cicd-queens-dvr-sl.local': True,
+                 u'ctl02.cookied-cicd-queens-dvr-sl.local': True,
+                u'ctl03.cookied-cicd-queens-dvr-sl.local': True}]}
+            When wrong function is given:
+                api.local('ctl01*', 'wrong_func', expr_form='compound')
+                {u'return': [{u'ctl01.cookied-cicd-queens-dvr-sl.local':
+                 u"'wrong_func' is not available."}]}
+            Empty return:
+                api.local('wrong_target', 'test.ping', expr_form='compound')
+                {u'return': [{}]}
+            """
+            if api_return.get('return', [{}]) != [{}]:
+                api_return = api_return['return'][0]
+                if only_first_match:
+                    api_return = next(api_return.iteritems())[1]
+                return api_return
+            else:
+                LOG.info('''Salt api returns empty result: [{}]''')
+                return False
+
         def wait_status(s):
-            inspect_res = self.salt_api.local(tgt,
-                                              'dockerng.inspect',
+            inspect_res = self.salt_api.local(tgt, 'dockerng.inspect',
                                               self.container_name)
-            if 'return' in inspect_res:
-                inspect = inspect_res['return']
-                inspect = inspect[0]
-                inspect = next(inspect.iteritems())[1]
+            inspect = simplify_salt_api_return(inspect_res)
+            if inspect:
                 status = inspect['State']['Status']
-
                 return status.lower() == s.lower()
-
             return False
 
-        helpers.wait(lambda: wait_status('exited'),
-                     timeout=timeout,
-                     timeout_msg=('Tempest run didnt finished '
-                                  'in {}'.format(timeout)))
+        if wait_status('running'):
+            helpers.wait(lambda: wait_status('exited'),
+                         timeout=timeout,
+                         timeout_msg=('Tempest run didnt finished '
+                                      'in {}'.format(timeout)))
 
-        inspect_res = self.salt_api.local(tgt,
-                                          'dockerng.inspect',
-                                          self.container_name)
-        inspect = inspect_res['return'][0]
-        inspect = next(inspect.iteritems())[1]
-        logs_res = self.salt_api.local(tgt,
-                                       'dockerng.logs',
-                                       self.container_name)
-        logs = logs_res['return'][0]
-        logs = next(logs.iteritems())[1]
+            inspect_res = self.salt_api.local(tgt, 'dockerng.inspect',
+                                              self.container_name)
+            inspect = simplify_salt_api_return(inspect_res)
 
-        res = self.salt_api.local(tgt, 'dockerng.rm', self.container_name)
-        LOG.info("Tempest container was removed".format(
-            json.dumps(res, indent=4)))
+            logs_res = self.salt_api.local(tgt, 'dockerng.logs',
+                                           self.container_name)
+            logs = simplify_salt_api_return(logs_res)
+            rm_res = self.salt_api.local(tgt, 'dockerng.rm',
+                                         self.container_name)
+            LOG.info("Tempest container was removed: {}".format(
+                json.dumps(rm_res, indent=4)))
+        else:
+            inspect_res = self.salt_api.local(tgt, 'dockerng.inspect',
+                                              self.container_name)
+            inspect = simplify_salt_api_return(inspect_res)
+            if inspect:
+                status = inspect['State']['Status']
+                LOG.info("Container is not in RUNNING state. "
+                         "Current container status is {}".format(status))
+                logs_res = self.salt_api.local(tgt,
+                                               'dockerng.logs',
+                                               self.container_name)
+                logs = simplify_salt_api_return(logs_res)
+            else:
+                LOG.info("dockerng returns unexpected"
+                         " result: {}".format(inspect_res))
+                logs = None
+                inspect = None
 
         return {'inspect': inspect,
                 'logs': logs}
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index 166b492..347e8e6 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -85,6 +85,10 @@
 TEMPEST_TARGET = os.environ.get('TEMPEST_TARGET', 'gtw01')
 SALT_VERSION = os.environ.get('SALT_VERSION', '2017.7')
 
+# REPOSITORY_SUITE is always defined in swarm-run-pytest from MCP_VERSION
+REPOSITORY_SUITE = os.environ.get('REPOSITORY_SUITE', 'proposed')
+MCP_VERSION = os.environ.get('MCP_VERSION', REPOSITORY_SUITE)
+
 SL_TEST_REPO = os.environ.get('SL_TEST_REPO',
                               'https://github.com/Mirantis/stacklight-pytest')
 SL_TEST_COMMIT = os.environ.get('SL_TEST_COMMIT', 'master')
diff --git a/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index d00a165..9fefeff 100644
--- a/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -91,14 +91,23 @@
   infra_bond_mode: active-backup
   infra_deploy_nic: eth0
   infra_kvm01_control_address: 10.167.11.241
-  infra_kvm01_deploy_address: 172.16.164.11
+  infra_kvm01_deploy_address: 172.16.164.3
   infra_kvm01_hostname: kvm01
   infra_kvm02_control_address: 10.167.11.242
-  infra_kvm02_deploy_address: 172.16.164.12
+  infra_kvm02_deploy_address: 172.16.164.4
   infra_kvm02_hostname: kvm02
   infra_kvm03_control_address: 10.167.11.243
-  infra_kvm03_deploy_address: 172.16.164.13
+  infra_kvm03_deploy_address: 172.16.164.5
   infra_kvm03_hostname: kvm03
+  infra_kvm04_control_address: 10.167.11.244
+  infra_kvm04_deploy_address: 172.16.164.6
+  infra_kvm04_hostname: kvm04
+  infra_kvm05_control_address: 10.167.11.245
+  infra_kvm05_deploy_address: 172.16.164.7
+  infra_kvm05_hostname: kvm05
+  infra_kvm06_control_address: 10.167.11.246
+  infra_kvm06_deploy_address: 172.16.164.8
+  infra_kvm06_hostname: kvm06
   infra_kvm_vip_address: 10.167.11.240
   infra_primary_first_nic: eth1
   infra_primary_second_nic: eth2
@@ -108,290 +117,575 @@
   maas_deploy_address: 172.16.164.2
   maas_deploy_cidr: 172.16.164.0/26
   maas_deploy_gateway: 172.16.164.1
-  maas_deploy_range_end: 172.16.164.55
-  maas_deploy_range_start: 172.16.164.15
+  maas_deploy_range_end: 172.16.164.62
+  maas_deploy_range_start: 172.16.164.18
   maas_deploy_vlan: '0'
   maas_dhcp_enabled: 'True'
   maas_fabric_name: fabric-0
   maas_hostname: cfg01
   maas_manage_deploy_network: 'True'
   maas_machines: |
-        kvm01: # #cz7050-kvm.host-telecom.com
+        kvm01: # #cz7625-kvm.host-telecom.com
           distro_series: "xenial"
           # hwe_kernel: "hwe-16.04"
           # pxe_interface_mac:
-          pxe_interface_mac: "00:25:90:e3:37:2c"
+          pxe_interface_mac: "0c:c4:7a:33:24:be"
           interfaces:
-            enp3s0f0:
-              mac: "00:25:90:e3:37:2c"
-              mode: "static"
-              ip: "172.16.164.11"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "176.74.222.96"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-          disk_layout:
-            type: custom
-            bootable_device: sda
-            disk:
-              vgroot:
-                devices:
-                  - sda
-                type: lvm
-                volume:
-                  root:
-                    mount: /
-                    size: 80G
-                    type: ext4
-              vgvar:
-                devices:
-                  - sdb
-                type: lvm
-                volume:
-                  var:
-                    mount: /var/
-                    size: 1T
-                    type: ext4
-        kvm02: # #cz7049-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "00:25:90:e3:3b:86"
-          interfaces:
-            enp3s0f0:
-              mac: "00:25:90:e3:3b:86"
-              mode: "static"
-              ip: "172.16.164.12"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "176.74.222.94"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-          disk_layout:
-            type: custom
-            bootable_device: sda
-            disk:
-              vgroot:
-                devices:
-                  - sda
-                type: lvm
-                volume:
-                  root:
-                    mount: /
-                    size: 80G
-                    type: ext4
-              vgvar:
-                devices:
-                  - sdb
-                type: lvm
-                volume:
-                  var:
-                    mount: /var/
-                    size: 1T
-                    type: ext4
-        kvm03: # #cz7048-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "00:25:90:e3:37:34"
-          interfaces:
-            enp3s0f0:
-              mac: "00:25:90:e3:37:34"
-              mode: "static"
-              ip: "172.16.164.13"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "176.74.222.92"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-          disk_layout:
-            type: custom
-            bootable_device: sda
-            disk:
-              vgroot:
-                devices:
-                  - sda
-                type: lvm
-                volume:
-                  root:
-                    mount: /
-                    size: 80G
-                    type: ext4
-              vgvar:
-                devices:
-                  - sdb
-                type: lvm
-                volume:
-                  var:
-                    mount: /var/
-                    size: 1T
-                    type: ext4
-        gtw01: # #cz7052-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "00:25:90:e3:37:12"
-          interfaces:
-            enp3s0f0:
-              mac: "00:25:90:e3:37:12"
-              mode: "static"
-              ip: "172.16.164.5"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "176.74.222.100"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        gtw02: # #cz7051-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "00:25:90:e3:3b:a4"
-          interfaces:
-            enp3s0f0:
-              mac: "00:25:90:e3:3b:a4"
-              mode: "static"
-              ip: "172.16.164.6"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "176.74.222.98"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        gtw03: # #cz7636-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:55:6a:28"
-          interfaces:
-            enp2s0f0:
-              mac: "0c:c4:7a:55:6a:28"
-              mode: "static"
-              ip: "172.16.164.7"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "185.8.59.241"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        osd001: # #cz7922-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:6c:9b:16"
-          interfaces:
-            enp9s0f0:
-              mac: "0c:c4:7a:6c:9b:16"
-              mode: "static"
-              ip: "172.16.164.8"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.227.32"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        osd002: # #cz7915-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:6d:3e:06"
-          interfaces:
-            enp9s0f0:
-              mac: "0c:c4:7a:6d:3e:06"
-              mode: "static"
-              ip: "172.16.164.9"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.227.25"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        osd003: # #cz7921-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:6c:9b:94"
-          interfaces:
-            enp9s0f0:
-              mac: "0c:c4:7a:6c:9b:94"
-              mode: "static"
-              ip: "172.16.164.10"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.227.31"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        cmp001: # #cz7913-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:6c:9a:1a"
-          interfaces:
-            enp9s0f0:
-              mac: "0c:c4:7a:6c:9a:1a"
+            one1:
+              mac: "0c:c4:7a:33:24:be"
               mode: "static"
               ip: "172.16.164.3"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:33:24:bf"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:01:3e"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:01:3f"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:58:f3:ce"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:58:f3:cf"
+              name: sten2
           power_parameters:
-            power_address: "5.43.227.23"
+            power_address: "185.8.59.227"
             power_pass: ==IPMI_PASS==
             power_type: ipmi
             power_user: ==IPMI_USER==
-        cmp002: # #cz7916-kvm.host-telecom.com
+        kvm02: # #cz7627-kvm.host-telecom.com
           distro_series: "xenial"
           # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:6c:89:4a"
+          pxe_interface_mac: "0c:c4:7a:33:2d:6a"
           interfaces:
-            enp9s0f9:
-              mac: "0c:c4:7a:6c:89:4a"
+            one1:
+              mac: "0c:c4:7a:33:2d:6a"
               mode: "static"
               ip: "172.16.164.4"
               subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
               gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:33:2d:6b"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:58:43:b8"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:58:43:b9"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1d:96:02"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1d:96:03"
+              name: sten2
           power_parameters:
-            power_address: "5.43.227.26"
+            power_address: "185.8.59.229"
             power_pass: ==IPMI_PASS==
             power_type: ipmi
             power_user: ==IPMI_USER==
-#        cmp001: # #cz7115-kvm.host-telecom.com
-#          distro_series: "xenial"
-#          # hwe_kernel: "hwe-16.04"
-#          pxe_interface_mac: "00:25:90:e4:19:58"
-#          interfaces:
-#            enp9s0f0:
-#              mac: "00:25:90:e4:19:58"
-#              mode: "static"
-#              ip: "172.16.164.3"
-#              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-#              gateway: ${_param:deploy_network_gateway}
-#          power_parameters:
-#            power_address: "185.8.58.66"
-#            power_pass: ==IPMI_PASS==
-#            power_type: ipmi
-#            power_user: ==IPMI_USER==
-#        cmp002: # #cz7116-kvm.host-telecom.com
-#          distro_series: "xenial"
-#          # hwe_kernel: "hwe-16.04"
-#          pxe_interface_mac: "00:25:90:e4:28:6a"
-#          interfaces:
-#            enp9s0f9:
-#              mac: "00:25:90:e4:28:6a"
-#              mode: "static"
-#              ip: "172.16.164.4"
-#              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-#              gateway: ${_param:deploy_network_gateway}
-#          power_parameters:
-#            power_address: "185.8.58.67"
-#            power_pass: ==IPMI_PASS==
-#            power_type: ipmi
-#            power_user: ==IPMI_USER==
-
+        kvm03: # #cz7756-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:69:a0:4c"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:69:a0:4c"
+              mode: "static"
+              ip: "172.16.164.5"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:69:a0:4d"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "00:25:90:c0:c2:14"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "00:25:90:c0:c2:15"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:09:c2"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:09:c3"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.225.88"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        kvm04: # #cz7792-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          # pxe_interface_mac:
+          pxe_interface_mac: "0c:c4:7a:6c:83:5c"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:6c:83:5c"
+              mode: "static"
+              ip: "172.16.164.6"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:6c:83:5d"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:7d:98"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:7d:99"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:03:de"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:03:df"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.225.112"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        kvm05: # #cz7876-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:6c:88:d6"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:6c:88:d6"
+              mode: "static"
+              ip: "172.16.164.7"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:6c:88:d7"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:03:74"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:03:75"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:89:be"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:89:bf"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.225.208"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        kvm06: # #cz8073-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:df:ac"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:df:ac"
+              mode: "static"
+              ip: "172.16.164.8"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:df:ad"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:3a:f2"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:3a:f3"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:a6:4c"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:a6:4d"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.227.118"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        gtw01: # #cz9039-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:d5:84"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:d5:84"
+              mode: "static"
+              ip: "172.16.164.9"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:d5:85"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:58:41:b0"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:58:41:b1"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1d:90:d2"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1d:90:d3"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.229.28"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        gtw02: # #cz9048-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:d5:82"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:d5:82"
+              mode: "static"
+              ip: "172.16.164.10"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:d5:83"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:00:7c"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:00:7d"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:bc:88:8a"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:bc:88:8b"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.225.23"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        gtw03: # #cz8159-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:6c:bc:f6"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:6c:bc:f6"
+              mode: "static"
+              ip: "172.16.164.11"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:6c:bc:f7"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "00:25:90:9b:cc:32"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "00:25:90:9b:cc:33"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "00:25:90:c1:a5:04"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "00:25:90:c1:a5:05"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.58.9"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        osd001: # #cz9040-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:c9:02"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:c9:02"
+              mode: "static"
+              ip: "172.16.164.12"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:c9:03"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:aa:90"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:aa:91"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:0a:a4"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:0a:a5"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.58.246"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        osd002: # #cz9041-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:d5:60"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:d5:60"
+              mode: "static"
+              ip: "172.16.164.13"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:d5:61"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:04:2c"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:04:2d"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:01:f2"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:01:f3"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.58.243"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        osd003: # #cz9042-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:c9:3a"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:c9:3a"
+              mode: "static"
+              ip: "172.16.164.14"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:c9:3b"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "00:25:90:33:d7:10"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "00:25:90:33:d7:11"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "00:25:90:0b:5f:50"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "00:25:90:0b:5f:51"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.58.244"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        cmp001: # #cz9039-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:d6:aa"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:d6:aa"
+              mode: "static"
+              ip: "172.16.164.15"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:d6:ab"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:86:76"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:86:77"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:39:3c"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1e:39:3d"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.58.248"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        cmp002: # #cz9046-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:ce:30"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:ce:30"
+              mode: "static"
+              ip: "172.16.164.16"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:ce:31"
+              name: one2
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:ce:31"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "00:25:90:e0:7d:e0"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "00:25:90:e0:7d:e1"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:0c:0e"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:0c:0f"
+              name: sten2
+          power_parameters:
+            power_address: "185.8.59.222"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
+        cmp003: # #cz8061-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:aa:e0:ce"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:aa:e0:ce"
+              mode: "static"
+              ip: "172.16.164.17"
+              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+              name: one1
+            one2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:aa:e0:cf"
+              name: one2
+            ten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1d:94:5e"
+              name: ten1
+            ten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1d:94:5f"
+              name: ten2
+            sten1:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:87:e4"
+              name: sten1
+            sten2:
+              mode: UNCONFIGURED
+              mac: "0c:c4:7a:1f:87:e5"
+              name: sten2
+          power_parameters:
+            power_address: "5.43.225.228"
+            power_pass: ==IPMI_PASS==
+            power_type: ipmi
+            power_user: ==IPMI_USER==
   mcp_version: proposed
   mcp_common_scripts_branch: ''
   offline_deployment: 'False'
@@ -401,13 +695,13 @@
   openldap_organisation: ${_param:cluster_name}
   openstack_benchmark_node01_address: 10.167.11.95
   openstack_benchmark_node01_hostname: bmk01
-  openstack_cluster_size: compact
-  openstack_compute_count: '2'
+  openstack_cluster_size: golden
+  openstack_compute_count: '3'
   openstack_compute_rack01_hostname: cmp
-  openstack_compute_single_address_ranges: 10.167.11.3-10.167.11.4
-  openstack_compute_deploy_address_ranges: 172.16.164.3-172.16.164.4
-  openstack_compute_tenant_address_ranges: 10.167.12.3-10.167.12.4
-  openstack_compute_backend_address_ranges: 10.167.12.3-10.167.12.4
+  openstack_compute_single_address_ranges: 10.167.11.15-10.167.11.17
+  openstack_compute_deploy_address_ranges: 172.16.164.15-172.16.164.17
+  openstack_compute_tenant_address_ranges: 10.167.12.15-10.167.12.17
+  openstack_compute_backend_address_ranges: 10.167.12.15-10.167.12.17
   openstack_control_address: 10.167.11.10
   openstack_control_hostname: ctl
   openstack_control_node01_address: 10.167.11.11
@@ -425,18 +719,18 @@
   openstack_database_node03_address: 10.167.11.53
   openstack_database_node03_hostname: dbs03
   openstack_enabled: 'True'
-  openstack_gateway_node01_deploy_address: 172.16.164.5
-  openstack_gateway_node02_deploy_address: 172.16.164.6
-  openstack_gateway_node03_deploy_address: 172.16.164.7
+  openstack_gateway_node01_deploy_address: 172.16.164.9
+  openstack_gateway_node02_deploy_address: 172.16.164.10
+  openstack_gateway_node03_deploy_address: 172.16.164.11
   openstack_gateway_node01_address: 10.167.11.224
   openstack_gateway_node01_hostname: gtw01
-  openstack_gateway_node01_tenant_address: 10.167.12.5
-  openstack_gateway_node02_address: 10.167.11.225
   openstack_gateway_node02_hostname: gtw02
-  openstack_gateway_node02_tenant_address: 10.167.12.6
-  openstack_gateway_node03_address: 10.167.11.226
   openstack_gateway_node03_hostname: gtw03
-  openstack_gateway_node03_tenant_address: 10.167.12.7
+  openstack_gateway_node01_tenant_address: 10.167.12.9
+  openstack_gateway_node02_address: 10.167.11.225
+  openstack_gateway_node02_tenant_address: 10.167.12.10
+  openstack_gateway_node03_address: 10.167.11.226
+  openstack_gateway_node03_tenant_address: 10.167.12.11
   openstack_message_queue_address: 10.167.11.40
   openstack_message_queue_hostname: msg
   openstack_message_queue_node01_address: 10.167.11.41
diff --git a/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-environment.yaml b/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-environment.yaml
index aa0d838..001131c 100644
--- a/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-environment.yaml
@@ -18,10 +18,12 @@
       - infra_kvm
       - linux_system_codename_xenial
       interfaces:
-        enp3s0f0:
+        one1:
           role: single_mgm_dhcp
-        enp3s0f1:
-          role: bond0_ab_ovs_vlan_ctl
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
 
     kvm02.cookied-cicd-ovs-maas.local:
       reclass_storage_name: infra_kvm_node02
@@ -29,10 +31,12 @@
       - infra_kvm
       - linux_system_codename_xenial
       interfaces:
-        enp3s0f0:
+        one1:
           role: single_mgm_dhcp
-        enp3s0f1:
-          role: bond0_ab_ovs_vlan_ctl
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
 
     kvm03.cookied-cicd-ovs-maas.local:
       reclass_storage_name: infra_kvm_node03
@@ -40,10 +44,54 @@
       - infra_kvm
       - linux_system_codename_xenial
       interfaces:
-        enp3s0f0:
+        one1:
           role: single_mgm_dhcp
-        enp3s0f1:
-          role: bond0_ab_ovs_vlan_ctl
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
+
+    kvm04.cookied-cicd-ovs-maas.local:
+      reclass_storage_name: infra_kvm_node04
+      roles:
+      - infra_kvm_wo_gluster
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
+
+    kvm05.cookied-cicd-ovs-maas.local:
+      reclass_storage_name: infra_kvm_node05
+      roles:
+      - infra_kvm_wo_gluster
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
+
+    kvm06.cookied-cicd-ovs-maas.local:
+      reclass_storage_name: infra_kvm_node06
+      roles:
+      - infra_kvm_wo_gluster
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        one1:
+          role: single_mgm_dhcp
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
 
     osd<<count>>:
       reclass_storage_name: ceph_osd_rack01
@@ -51,10 +99,12 @@
       - ceph_osd
       - linux_system_codename_xenial
       interfaces:
-        enp9s0f0:
-          role: single_dhcp
-        enp9s0f1:
-          role: single_vlan_ctl
+        one1:
+          role: single_mgm_dhcp
+        ten2:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_ctl_lacp
 #          role: bond0_ab_vlan_ceph_storage_backend
 
     cmp<<count>>:
@@ -63,10 +113,16 @@
       - openstack_compute
       - linux_system_codename_xenial
       interfaces:
-        enp9s0f0:
+        one1:
           role: single_mgm_dhcp
-        enp9s0f1:
-          role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+        ten1:
+          role: bond_ctl_lacp
+        ten2:
+          role: bond_prv_lacp
+        sten1:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_prv_lacp
 
     gtw01.cookied-cicd-ovs-maas.local:
       reclass_storage_name: openstack_gateway_node01
@@ -74,12 +130,16 @@
       - openstack_gateway
       - linux_system_codename_xenial
       interfaces:
-        enp3s0f0:
+        one1:
           role: single_mgm_dhcp
-        enp3s0f1:
-          role: bond0_ab_dvr_vxlan_ctl_mesh_external
-          external_address: 172.17.42.5
-          external_network_netmask: 255.255.255.192
+        ten1:
+          role: bond_ctl_lacp
+        ten2:
+          role: bond_prv_lacp
+        sten1:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_prv_lacp
 
     gtw02.cookied-cicd-ovs-maas.local:
       reclass_storage_name: openstack_gateway_node02
@@ -87,12 +147,16 @@
       - openstack_gateway
       - linux_system_codename_xenial
       interfaces:
-        enp3s0f0:
+        one1:
           role: single_mgm_dhcp
-        enp3s0f1:
-          role: bond0_ab_dvr_vxlan_ctl_mesh_external
-          external_address: 172.17.42.6
-          external_network_netmask: 255.255.255.192
+        ten1:
+          role: bond_ctl_lacp
+        ten2:
+          role: bond_prv_lacp
+        sten1:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_prv_lacp
 
     gtw03.cookied-cicd-ovs-maas.local:
       reclass_storage_name: openstack_gateway_node03
@@ -100,9 +164,13 @@
       - openstack_gateway
       - linux_system_codename_xenial
       interfaces:
-        enp2s0f0:
+        one1:
           role: single_mgm_dhcp
-        enp2s0f1:
-          role: bond0_ab_dvr_vxlan_ctl_mesh_external
-          external_address: 172.17.42.7
-          external_network_netmask: 255.255.255.192
+        ten1:
+          role: bond_ctl_lacp
+        ten2:
+          role: bond_prv_lacp
+        sten1:
+          role: bond_ctl_lacp
+        sten2:
+          role: bond_prv_lacp
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay.yaml
index 9bc4c1f..ec758d3 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay.yaml
@@ -848,7 +848,7 @@
           - name: {{ HOSTNAME_CID01 }}
             role: salt_minion
             params:
-              vcpu: {{ os_env('CID_NODE_CPU', 1) }}
+              vcpu: {{ os_env('CID_NODE_CPU', 2) }}
               memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
               boot:
                 - hd
@@ -874,7 +874,7 @@
           - name: {{ HOSTNAME_CID02 }}
             role: salt_minion
             params:
-              vcpu: {{ os_env('CID_NODE_CPU', 1) }}
+              vcpu: {{ os_env('CID_NODE_CPU', 2) }}
               memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
               boot:
                 - hd
@@ -900,7 +900,7 @@
           - name: {{ HOSTNAME_CID03 }}
             role: salt_minion
             params:
-              vcpu: {{ os_env('CID_NODE_CPU', 1) }}
+              vcpu: {{ os_env('CID_NODE_CPU', 2) }}
               memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
               boot:
                 - hd
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml
index bae6a02..2d7c21d 100644
--- a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml
@@ -902,7 +902,7 @@
           - name: {{ HOSTNAME_CID01 }}
             role: salt_minion
             params:
-              vcpu: {{ os_env('CID_NODE_CPU', 1) }}
+              vcpu: {{ os_env('CID_NODE_CPU', 2) }}
               memory: {{ os_env('CID_NODE_MEMORY', 16384) }}
               boot:
                 - hd
@@ -928,7 +928,7 @@
           - name: {{ HOSTNAME_CID02 }}
             role: salt_minion
             params:
-              vcpu: {{ os_env('CID_NODE_CPU', 1) }}
+              vcpu: {{ os_env('CID_NODE_CPU', 2) }}
               memory: {{ os_env('CID_NODE_MEMORY', 16384) }}
               boot:
                 - hd
@@ -954,7 +954,7 @@
           - name: {{ HOSTNAME_CID03 }}
             role: salt_minion
             params:
-              vcpu: {{ os_env('CID_NODE_CPU', 1) }}
+              vcpu: {{ os_env('CID_NODE_CPU', 2) }}
               memory: {{ os_env('CID_NODE_MEMORY', 16384) }}
               boot:
                 - hd
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-ovs-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-ovs-maas.yaml
index 7ce4d69..3936eb5 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-ovs-maas.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-ovs-maas.yaml
@@ -57,18 +57,9 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Temporary workaround !! Fix or debug
+- description: "WR for PROD-27976"
   cmd: |
-    sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
-    sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+    sed -i 's/physical\_control\_cluster/physical\_control\_small/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Temporary workaround for removing virtual gtw nodes
-  cmd: |
-    sed -i 's/\-\ system\.salt\.control\.sizes\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-    sed -i 's/\-\ system\.salt\.control\.placement\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-small-mcp-ironic.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-small-mcp-ironic.yaml
new file mode 100644
index 0000000..c32d229
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-small-mcp-ironic.yaml
@@ -0,0 +1,66 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-small-mcp-ironic' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-small-mcp-ironic.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context.yaml', 'cookiecutter-context-small-mcp-ironic.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+#- description: "Workaround for using glusterfs on single node"
+#  cmd: |
+#    set -e;
+#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/replica: .*//g' {} +
+#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/.*cluster_node02_address.*//g' {} +
+#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/.*cluster_node03_address.*//g' {} +
+#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/opts: .*/opts: \"defaults,backup-volfile-servers=${_param:glusterfs_node01_address}\"/g' {} +
+#
+# node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 5}
+#  skip_fail: false
+
+#- description: "Workaround for changing services to single mode"
+#  cmd: |
+#    set -e;
+#    sed -i 's/- system.keystone.server.cluster/- system.keystone.server.single/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+#    sed -i 's/- system.rabbitmq.server.cluster/- system.rabbitmq.server.single/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/message_queue.yml;
+#
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 5}
+#  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-small-mcp-ironic/cookiecutter-context-small-mcp-ironic.yaml b/tcp_tests/templates/cookied-small-mcp-ironic/cookiecutter-context-small-mcp-ironic.yaml
new file mode 100644
index 0000000..1ca5bd3
--- /dev/null
+++ b/tcp_tests/templates/cookied-small-mcp-ironic/cookiecutter-context-small-mcp-ironic.yaml
@@ -0,0 +1,324 @@
+default_context:
+  alertmanager_notification_email_enabled: 'False'
+  auditd_enabled: 'False'
+  backend_network_netmask: ''
+  backup_private_key: '-----BEGIN RSA PRIVATE KEY-----
+
+    MIIEpAIBAAKCAQEA0YjCIcsHq6Jmai5de19wOERdbMMP1kvaHAK2j5tCiywJrmsN
+
+    Y+nzGzFKqUiuW/HlvHCY6UNdlDyz6H7WdhzmRoFgVIzWGWR1rJk70D2JdN+QrKhA
+
+    BqAhJ/zIOn0btx3y30VaIHAR6V9P6sezYLtaQAP9LLuHV+9tJH0dY3sisYDd9PdS
+
+    Hn2o0CYpe2Ojt1vaKhhIbtzxlaXM4XelxVbaahlIyzW6ooXYL5zwccG+g11T0R9z
+
+    6YNym+pI1arFjOTobeEiCkDUX+oFd/ICm3ntt6LP69gZKcGSOFB/8n17nBZfmqQf
+
+    puSwZKqwp6pca4VaT2uJx0jW9mBbMgyhoyKuPwIDAQABAoIBAQCAxfCxPgrfw7X3
+
+    ablP+i6IlhNopVTVWdaYwl4aUcwKpizpAafAEOa0fo2pDh8CKN8M+ZIwQZOAZ/IV
+
+    X+ZDvq0TBa4DKG7oOiJLyfzFlRwmNMPAKML4j27xGVyg/YSL/J7n8sJaDyYi6828
+
+    t7CZMWtczlbJKBMcyuujsjTej62ZskAz9S9LC3A8ppLYe2/8WUZueLehXVLfy3rO
+
+    c/7LU+zQ9kcP/nEuFgMYzcLxI8bJligI5JdtXL5baK3DX/9UsWzDouHePCCYo07k
+
+    xInodc9WCHKeAriV6qCctOm6TIhB30hDNQl+rnF2c+Ead5hyP1UneUW+8D8RSxe7
+
+    CT27o3IpAoGBAP8e4N+dbUxAAlRx+20Dgad7/g/zBb+HJyUIsmpxF5RebJNLp03o
+
+    8bOS1we/lS1HIQFolveR/pcoWowJUpDkPJLXC8Wnfjs5NvKRsqV5OLp+m9DynQ1y
+
+    xZmTfHJV4aluZvUd5Azw0lOdAgNu97fLS4IYS6hRtuEncSwWbDHIN9GlAoGBANJB
+
+    p2Z4h49XJ0YigUw7S/VyuU9vSA6nLQwehBMyAl6hmvnCg3DB8sNuOQYZcqr+aUyc
+
+    gicyiEwvwW8Qbm837eqv/8CJSkfQq/8JVg6F4vRweNI5eYv2N/ZInmSn1opYzqEd
+
+    J4TFalwwgUqbLer+dhCjfcn6mdkRyrnW1GepvXMTAoGBAJcJIdpg6gcdUgPKYy5b
+
+    yBNuna+1kW6dRfhv2KiZgXsuF5twS4EdapKEWVdV/awLkyexUscIoK++jTSghEgR
+
+    RycrtuVyTpIjQjuDND8wr/wA3qBqMb53dzJ/lUpfO7TCDqQI6S0cGXi02C9OL8uY
+
+    yIAhgBELJ3jOj/qo367tONadAoGAKz8l14XjHDCo+1wNjEiGDy5Rv2z8PUdVlLCS
+
+    KgCXTC+hWM4RixfZfykkwYRqeZFqxz9J5hYWwtTvzJBspqOyZBtfV5LlnG/ncMXS
+
+    1ZnkXLLlpxf7UDaMvDOjoMCE+F/b4HfGsSCKB/xSG65fe35renCmZu0MyAFI2YC+
+
+    n7PiK9ECgYAZ/9KBk0FQmOpc13+oqHyMPOUHPnfFkmVVJgaJQkLWst3x6+Mx0uQW
+
+    KFddR3UNh8V+oOP/WujT85WtueM2E3M4/C+koeVlaDFh8g4qglnpSuT4CTqLpmPb
+
+    KYWKD0IElw7/4ny4VTTMe6KbnDV0A154tFNvsTX6ELvy4V8OFuPfnQ==
+
+    -----END RSA PRIVATE KEY-----'
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDRiMIhyweromZqLl17X3A4RF1sww/WS9ocAraPm0KLLAmuaw1j6fMbMUqpSK5b8eW8cJjpQ12UPLPoftZ2HOZGgWBUjNYZZHWsmTvQPYl035CsqEAGoCEn/Mg6fRu3HfLfRVogcBHpX0/qx7Ngu1pAA/0su4dX720kfR1jeyKxgN3091IefajQJil7Y6O3W9oqGEhu3PGVpczhd6XFVtpqGUjLNbqihdgvnPBxwb6DXVPRH3Ppg3Kb6kjVqsWM5Oht4SIKQNRf6gV38gKbee23os/r2BkpwZI4UH/yfXucFl+apB+m5LBkqrCnqlxrhVpPa4nHSNb2YFsyDKGjIq4/
+  barbican_backend: dogtag
+  barbican_enabled: 'False'
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cfg_failsafe_ssh_public_key: '1'
+  cicd_enabled: 'False'
+  cicd_private_key: '-----BEGIN RSA PRIVATE KEY-----
+
+    MIIEpAIBAAKCAQEA0EnXqmZqVb+/ebPURO9wb3A8ntUfQmQOC1WNJv/hU7XathNA
+
+    kDmDSMCn9a7m7WbuANpVhZkmstebNgHFoCtGSL4JJYRrBB5QKN0QrpFKiGaetCE2
+
+    eGDAmvvFx9hgbQUwtuJvAhUg4sSq6WY0yblcID3fLX4YKuDVvS8fisB6i1xrQ/tQ
+
+    qEF8S5rsoXUigYnZZwpnkRgSuRtECY8OFSXH4sv+JPEVhrx5GD7CtwVuYliIg1mw
+
+    nF7J5X9wPNmNnm8g4Nz07zpA2FMoF5+QuPZrV7t3Xm2hcSmMZbsGG4zy2pqbwDvR
+
+    5FgQLfaPBYcqrrIr/kCnCajDzpZdBxIjdXPK9QIDAQABAoIBAQCEiVhIP58OLVND
+
+    t+AsdUfQQVTxpJ+deVlOcQO+ezgAMkmXbiy5GT+mvaCivsaG7eYoeMXOmyN6zaMf
+
+    /ISqZJ72jqX3T4lhN4C+X9zLl/wbS2FVMYUdmEM221qAzfidpp3W4cLNSnCAm7A9
+
+    kCGq8t3iTjyDECeEsUiQdznU6qGPpvqRC9E2wlatbUowYT8VSbtc9aDGkZNMnZAP
+
+    ypBzGQOhIIIs3V3m0XqF5dsqxq+IjZmBjkJ8TBEyRre+Hu25r4ksQk42Qw8Lj1yI
+
+    W/+XTJiI04XLbCubeknQuTy3baku1i58gEVuJcYdeC3pCF4nu1PvBRxgVE1TU1xq
+
+    mIL2rBrJAoGBAPjSOvM/XmBfwW3znJ7xknDoLFq2yoI5bAr0ehr0/VLbplAybY1b
+
+    6mWcpiWcNPnCwAUXUjI8FRklQwMrCNvdXBlixZM5Au1Bsg1JjuYrQScc8dRFDWH5
+
+    8YDFxrR9ijFkYmhTHOMfm2vk5BxaOgIvAwv5XN43Li1nKAjlwU3euPZTAoGBANZM
+
+    PbSHJ3Y8llOWNwFqqYC9VGVAC5K+kKKmJmUKBluZpHyYYwdGqF+ItM8LzLilW/jj
+
+    CZF9KU4lwovbTHDsvOvYPJFO+nVfXYpiqFG95X5w9L1qnfLhWNfbNjp1JgzOadGb
+
+    RIPKktjjJEde9ShNPRfHWRzgxAvj+57moq3v64CXAoGAAlyMbq6VSLjf7xk2zVNh
+
+    nmy1rw65EU9WNSxo1ESq+tRW2cAAxiyvJtO7x/OZlR1CEUpNX2iukpSF9Eu+Q8fw
+
+    DdWgJmuOGY5cEEA4ePrEHYjqyqb1H47tudkmr6PZYeqf/Hl9drJgGUAM4jABCPBF
+
+    SSHOvdUsPQYTnTIBCaopez0CgYEAsj9YVADXYhGjOIOJ3TPLKbpRqKZM+hJoW+G3
+
+    rfNYtnhlyP034aV0B4K+Yjl+X1er2KmAG/Pvl4DxAUO3oXZI7iM+vd7jlR1twN2v
+
+    A87gRPvPln5IQu4N5/3+bUFkFOLcQezUYIru+lh0pKjnYk5cspquFMcgSoOnl9Rf
+
+    HC6jxKECgYBxh0hH+vmudh36zuhqghZSI+DLd6WHpzpnnQeSkI2sHPB7EGTZ8+Ql
+
+    Rykt8XGiZvkuc9geH5Sc6aIKJr7WWTxgwmhj7T6iBHLFLyGfcAvUAGcLMOnjNFcL
+
+    qEaNVOfzXB9ZBN1h8wRbxoKx+o2c78agrQyetEyiz7wkYFQKj8xq4Q==
+
+    -----END RSA PRIVATE KEY-----'
+  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQSdeqZmpVv795s9RE73BvcDye1R9CZA4LVY0m/+FTtdq2E0CQOYNIwKf1rubtZu4A2lWFmSay15s2AcWgK0ZIvgklhGsEHlAo3RCukUqIZp60ITZ4YMCa+8XH2GBtBTC24m8CFSDixKrpZjTJuVwgPd8tfhgq4NW9Lx+KwHqLXGtD+1CoQXxLmuyhdSKBidlnCmeRGBK5G0QJjw4VJcfiy/4k8RWGvHkYPsK3BW5iWIiDWbCcXsnlf3A82Y2ebyDg3PTvOkDYUygXn5C49mtXu3debaFxKYxluwYbjPLampvAO9HkWBAt9o8Fhyqusiv+QKcJqMPOll0HEiN1c8r1
+  cluster_domain: small_cloud.local
+  cluster_name: small_cloud
+  compute_bond_mode: active-backup
+  compute_padding_with_zeros: 'True'
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: 88zA67wwzN74hI8Vzpy7CCEDXPGfKGUv37965C5bKeIZM8436V73PhAT9yaLDUYV0xj8zpidxbmh0FMN83dzNWAA
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.60.0.0/24
+  control_vlan: '10'
+  cookiecutter_template_branch: ''
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: ssh://gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  deploy_network_gateway: 10.70.0.1
+  deploy_network_netmask: 255.255.255.0
+  deploy_network_subnet: 10.70.0.0/24
+  deployment_type: physical
+  designate_backend: powerdns
+  designate_enabled: 'False'
+  openstack_create_public_network: 'True'
+  openstack_public_neutron_subnet_gateway: 172.17.41.129
+  openstack_public_neutron_subnet_cidr: 172.17.41.128/26
+  openstack_public_neutron_subnet_allocation_start: 172.17.41.170
+  openstack_public_neutron_subnet_allocation_end: 172.17.41.190
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.208.44
+  email_address: sgudz@mirantis.com
+  gainsight_service_enabled: 'False'
+  galera_ssl_enabled: 'False'
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  gnocchi_aggregation_storage: file
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.60.0.11
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.60.0.12
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.60.0.13
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.60.0.10
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  internal_proxy_enabled: 'False'
+  ironic_enabled: 'True'
+  kubernetes_ctl_on_kvm: 'False'
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_enabled: 'False'
+  manila_enabled: 'False'
+  manila_lvm_devices: /dev/sdb,/dev/sdc
+  manila_lvm_volume_name: manila-volume
+  manila_share_backend: lvm
+  mcp_common_scripts_branch: ''
+  mcp_version: 2019.2.0
+  motd_company_name: MirantisTestDeployment
+  no_platform: 'False'
+  nova_vnc_tls_enabled: 'False'
+  octavia_private_key: '-----BEGIN RSA PRIVATE KEY-----
+
+    MIIEpAIBAAKCAQEAuiHjkWRkhIWdhiRPqdE/rYIbcYsMkDSIZrlu0yzicGBxyNOH
+
+    qwzW48zQr5y/q4HaaPC5HB4LGylnEnyM4vSII+kiT9E8T1sr/XNi9GqYonPVEDUu
+
+    aE8EmZ1RnVppTqpkbGpRnF6GmBtGSdthM3pYt97/UeaviFJye3G3tz47pYwXND4j
+
+    6maElcSoUEntpHkJ5esBy+G3yr2Y9mF9EOV6ZNxN1jIc2ufxFTQruhqxx8ug1EWF
+
+    9JlEByfML6gwHq3FgSz2MHWX+to+LRJALv5KY4UAworAPzafY4/557c6ggqvvakk
+
+    wqju59z5QWqBV8Vu+30VTdbQd8xsnYlPdAUziwIDAQABAoIBAAkF5YtBy6Gk/Irp
+
+    Lbd0vlqB6SSq8fP3p/ka2iWAkoWSF1H99yCyoaQvZJpuxdfhlp3ptX+IcU9ixW1e
+
+    /lFuOz9KaPBnmz3ZlJtXwZ9Jn5aYeOatPU+vLPDpcHBpVpuKpQ2gZc5nwqyS/ehM
+
+    qzycnLplJAlrXm0EWipjy90lNi0DNV57hpaKKH5Ti/UGQnGTAfpFXf/YMu4PVVFj
+
+    GHx9VyTAX7dM1yRHMIF4/3qFAnbig8erCeRAEeUhLjMs6T1KidgBltyr7dz4mYoX
+
+    3MdgwTL4rnBCFFopN8vwcK+7bkj/6D1g2a6RnExa6ZB4QJBf28iBfn+i7qoNvt+1
+
+    iwgo9DECgYEA3IpFCAOMYiHagRrv3jTT0qKgCYBdt1La2Sjj7hCZUYJBp1it5zjr
+
+    VbT2J4xH38fr8OflEwcdpJnOVPKQA3AgRrixF8wG5WxIaDxBruOYE6QoqWSwnNYJ
+
+    egcKIDHbZH0C9y+P45IGbHEUEKJIdtSHo6uJYEQ+JSJdlR/BuTQDCFkCgYEA2A9Z
+
+    oxu7gwc1y01Y0FskU0guLm1XyDQEUf7uyFOMu7Fou/Nb4mKLVH5WmM5MQSBt77ow
+
+    yBOZliT7ucSzQYkZ7xENAJ4QNzLAsQ9nBOgAjpnSCM6KZXdklntdl0yc5vy3CrkK
+
+    QxcRURJdvrCNh+yGON7gh2tN3hmHldz9mIe5noMCgYBZL5WBCyZPXQ9nHISaYNS0
+
+    ns2HaiD8DQaxwUaDtLS78KgqS8Nv+WW6MEdHcQRz6/5vWugZjMwhD44QblNtAIAc
+
+    +X2sePbk+qhBdvS9DA8pCj7jWfPOSQRmE8i1glQNzDxRyCsxRZFRc11A6M/TNllw
+
+    B+OzSYW0MLSVpNUL14vOaQKBgQCx8IJsOKzHjqqZEsxwXH+gLzQJfHftJBnixcg2
+
+    J8kh00xkblpgKbSCE8KR+mUGxgSEiJ0gSjQVWcQPDJZtBNLc9vf0aDkGnL/hksPP
+
+    YJsE1l4Kbr/ALQIkhNlhf/FAsyS0qgxvkJHsaOnX4GPPa9ZnA/d6z77DidcGb4hT
+
+    lIQkrQKBgQCMKuX59yLIOEHgiaBNnM1/nunSyinlTbEU7mJuYKPdScJ8qi/CouN8
+
+    UBeSH9OEKUDqxj1V8BeHKCPcHrLRwTsYONpR+6VQ2n0Y7UUHwz4ZY+ljAUHhD/4B
+
+    d6GOUmhxa7PCcr2g4RwsGyDwvwm7fyQb0cCPW0aLeiLeVsdUBd5gbg==
+
+    -----END RSA PRIVATE KEY-----'
+  octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6IeORZGSEhZ2GJE+p0T+tghtxiwyQNIhmuW7TLOJwYHHI04erDNbjzNCvnL+rgdpo8LkcHgsbKWcSfIzi9Igj6SJP0TxPWyv9c2L0apiic9UQNS5oTwSZnVGdWmlOqmRsalGcXoaYG0ZJ22Ezeli33v9R5q+IUnJ7cbe3PjuljBc0PiPqZoSVxKhQSe2keQnl6wHL4bfKvZj2YX0Q5Xpk3E3WMhza5/EVNCu6GrHHy6DURYX0mUQHJ8wvqDAercWBLPYwdZf62j4tEkAu/kpjhQDCisA/Np9jj/nntzqCCq+9qSTCqO7n3PlBaoFXxW77fRVN1tB3zGydiU90BTOL
+  offline_deployment: 'False'
+  opencontrail_compute_iface_mask: '24'
+  opencontrail_enabled: 'False'
+  openscap_enabled: 'False'
+  openssh_groups: ''
+  openstack_benchmark_node01_address: 10.60.0.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_control_address_ranges: 10.60.0.101-10.60.0.200
+  openstack_compute_count: '100'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_tenant_address_ranges: 10.80.0.101-10.80.0.200
+  openstack_control_address: 10.60.0.10
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.60.0.11
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.60.0.12
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.60.0.13
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.60.0.10
+  openstack_database_hostname: ctl
+  openstack_database_node01_address: 10.60.0.11
+  openstack_database_node01_hostname: ctl01
+  openstack_database_node02_address: 10.60.0.12
+  openstack_database_node02_hostname: ctl02
+  openstack_database_node03_address: 10.60.0.13
+  openstack_database_node03_hostname: ctl03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_address: 10.60.0.224
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node01_tenant_address: 10.80.0.6
+  openstack_gateway_node02_address: 10.60.0.225
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node02_tenant_address: 10.80.0.7
+  openstack_gateway_node03_address: 10.60.0.226
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node03_tenant_address: 10.80.0.8
+  openstack_internal_protocol: http
+  openstack_memcache_security_enabled: 'False'
+  openstack_message_queue_address: 10.60.0.10
+  openstack_message_queue_hostname: ctl
+  openstack_message_queue_node01_address: 10.60.0.11
+  openstack_message_queue_node01_hostname: ctl01
+  openstack_message_queue_node02_address: 10.60.0.12
+  openstack_message_queue_node02_hostname: ctl02
+  openstack_message_queue_node03_address: 10.60.0.13
+  openstack_message_queue_node03_hostname: ctl03
+  openstack_network_engine: ovs
+  openstack_neutron_bgp_vpn: 'False'
+  openstack_neutron_bgp_vpn_driver: bagpipe
+  openstack_neutron_qos: 'False'
+  openstack_neutron_vlan_aware_vms: 'False'
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_nova_compute_reserved_host_memory_mb: '900'
+  openstack_octavia_enabled: 'False'
+  openstack_ovs_dvr_enabled: 'False'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 10.60.0.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.60.0.81
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.60.0.82
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 10.60.0.19
+  openstack_version: queens
+  platform: openstack_enabled
+  publication_method: email
+  rabbitmq_ssl_enabled: 'False'
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: 8UWALgLbADCp18pAj8hCIzJE2ZWPXgK7E23n8W44ji0A
+  salt_api_password_hash: $6$FaHYNcaGIBJF$n4hvLCOhR0/IrbPVgWCfddYilsmXF8T1hj38VJ2auL5Y8DdY2TG2/wc6KNCivYe8uQ68L0keoDfcu1eio.WbS1
+  salt_master_address: 10.60.0.15
+  salt_master_hostname: cfg01
+  salt_master_management_address: 10.70.0.15
+  secrets_encryption_enabled: 'False'
+  secrets_encryption_private_key: '1'
+  sf_notifications_enabled: 'False'
+  shared_reclass_branch: ''
+  shared_reclass_url: https://github.com/Mirantis/reclass-system-salt-model.git
+  sriov_network_subnet: 10.55.0.0/16
+  stacklight_enabled: 'False'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.80.0.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.80.0.0/24
+  tenant_telemetry_enabled: 'False'
+  tenant_vlan: '20'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'True'
+  vnf_onboarding_enabled: 'False'
+  xtrabackup_client_throttle: '0'
diff --git a/tcp_tests/templates/cookied-small-mcp-ironic/environment-context.yaml b/tcp_tests/templates/cookied-small-mcp-ironic/environment-context.yaml
new file mode 100644
index 0000000..fb103d7
--- /dev/null
+++ b/tcp_tests/templates/cookied-small-mcp-ironic/environment-context.yaml
@@ -0,0 +1,105 @@
+nodes:
+    cfg01:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      - features_runtest
+      classes:
+      - environment.cookied-small-mcp-ironic.override_ntp_virtual
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_static_ctl
+
+    ctl01:
+      reclass_storage_name: openstack_control_node01
+      roles:
+      - infra_kvm
+      - openstack_control_leader
+      - openstack_database_leader
+      - openstack_message_queue
+      - features_lvm_backend_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl02:
+      reclass_storage_name: openstack_control_node02
+      roles:
+      - infra_kvm
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - features_lvm_backend_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl03:
+      reclass_storage_name: openstack_control_node03
+      roles:
+      - infra_kvm
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - features_lvm_backend_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    prx01:
+      reclass_storage_name: openstack_proxy_node01
+      roles:
+      - openstack_proxy  # another VIP interface used
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: single_external
+          external_address: 172.17.16.121
+          external_network_netmask: 255.255.255.0
+
+    gtw01:
+      reclass_storage_name: openstack_gateway_node01
+      roles:
+      - linux_system_codename_xenial
+      classes:
+      - system.linux.system.repo.mcp.apt_mirantis.docker
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: bond0_ab_ovs_vxlan_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
+        ens7:
+          role: single_ovs_br_baremetal
+
+    bmt01:
+      reclass_storage_name: openstack_baremetal_node01
+      roles:
+      - openstack_baremetal
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens7:
+          role: single_baremetal
diff --git a/tcp_tests/templates/cookied-small-mcp-ironic/salt.yaml b/tcp_tests/templates/cookied-small-mcp-ironic/salt.yaml
new file mode 100644
index 0000000..b82e26c
--- /dev/null
+++ b/tcp_tests/templates/cookied-small-mcp-ironic/salt.yaml
@@ -0,0 +1,26 @@
+{% from 'cookied-small-mcp-ironic/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-small-mcp-ironic/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-small-mcp-ironic/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+#- description: "Workaround for using glusterfs on single node"
+#  cmd: |
+#    set -e;
+#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/replica: .*//g' {} +
+#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/.*cluster_node02_address.*//g' {} +
+#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/.*cluster_node03_address.*//g' {} +
+#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/opts: .*/opts: \"defaults,backup-volfile-servers=${_param:glusterfs_node01_address}\"/g' {} +
+#
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 5}
+#  skip_fail: false
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-small-mcp-ironic/underlay--meta-data.yaml b/tcp_tests/templates/cookied-small-mcp-ironic/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-small-mcp-ironic/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-small-mcp-ironic/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-small-mcp-ironic/underlay--user-data1604-swp.yaml
new file mode 100644
index 0000000..81936a4
--- /dev/null
+++ b/tcp_tests/templates/cookied-small-mcp-ironic/underlay--user-data1604-swp.yaml
@@ -0,0 +1,84 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - if lvs vg0; then pvresize /dev/vda3; fi
+   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 16G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /usr/share/growlvm/image-layout.yml
+     content: |
+       root:
+         size: '30%VG'
+       home:
+         size: '1G'
+       var_log:
+         size: '11%VG'
+       var_log_audit:
+         size: '5G'
+       var_tmp:
+         size: '11%VG'
+       tmp:
+         size: '5G'
+     owner: root:root
+
+  growpart:
+    mode: auto
+    devices:
+      - '/'
+      - '/dev/vda3'
+    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-small-mcp-ironic/underlay.yaml b/tcp_tests/templates/cookied-small-mcp-ironic/underlay.yaml
new file mode 100644
index 0000000..0311867
--- /dev/null
+++ b/tcp_tests/templates/cookied-small-mcp-ironic/underlay.yaml
@@ -0,0 +1,397 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-small-mcp-ironic/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-small-mcp-ironic/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-small-mcp-ironic') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_BMT01 = os_env('HOSTNAME_BMT01', 'bmt01.' + DOMAIN_NAME) %}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'cookied-small-mcp-ironic_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_BMT01 }}: +76
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_BMT01 }}: +76
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+180, +220]
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+          tenant: tenant-pool01
+          external: external-pool01
+
+        l2_network_devices:
+          private:
+            address_pool: private-pool01
+            dhcp: false
+            forward:
+              mode: route
+
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          tenant:
+            address_pool: tenant-pool01
+            dhcp: false
+
+          external:
+            address_pool: external-pool01
+            dhcp: false
+            forward:
+              mode: route
+
+          baremetal:
+            parent_iface:
+              phys_dev: {{ os_env('IFACE_IRONIC', 'veth-contdpdk1') }}
+
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: {{ os_env('MCP_IMAGE_PATH1604') }}  # http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+         - name: mcp_ubuntu_1604_image           # Pre-configured image for control plane
+           source_image: !os_env MCP_IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
+              memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: config
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
+                                                            # it will be uploaded after config drive generation
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: &interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config: &network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - external
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: &all_gtw_interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: tenant
+                  interface_model: *interface_model
+                - label: ens6
+                  l2_network_device: external
+                  interface_model: *interface_model
+                - label: ens7
+                  l2_network_device: baremetal
+                  interface_model: *interface_model
+              network_config: &all_gtw_network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - tenant
+                ens6:
+                  networks:
+                    - external
+                ens7:
+                  networks:
+                    - baremetal
+
+          - name: {{ HOSTNAME_BMT01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *all_gtw_interfaces
+              network_config: *all_gtw_network_config
diff --git a/tcp_tests/tests/system/test_cvp_pipelines.py b/tcp_tests/tests/system/test_cvp_pipelines.py
index cb6a5bb..acad827 100644
--- a/tcp_tests/tests/system/test_cvp_pipelines.py
+++ b/tcp_tests/tests/system/test_cvp_pipelines.py
@@ -121,10 +121,16 @@
         jenkins_start_timeout = 60
         jenkins_build_timeout = 1800
 
+        maas_minion_id = salt.get_single_pillar(
+            tgt='I@maas:cluster:enabled:True or I@maas:region:enabled:True',
+            pillar="__reclass__:nodename")
+
         job_name = 'cvp-sanity'
         job_parameters = {
             'TEST_SET': '/var/lib/cvp-sanity/cvp_checks/tests/',
-            'TESTS_SETTINGS': 'drivetrain_version=proposed',
+            'TESTS_SETTINGS': (
+                'drivetrain_version={0};ntp_skipped_nodes={1}'
+                .format(settings.MCP_VERSION, maas_minion_id)),
         }
 
         show_step(2)
diff --git a/tcp_tests/tests/system/test_offline.py b/tcp_tests/tests/system/test_offline.py
index 4d73e84..62b1c9f 100644
--- a/tcp_tests/tests/system/test_offline.py
+++ b/tcp_tests/tests/system/test_offline.py
@@ -263,8 +263,14 @@
         salt_deployed.update_ssh_data_from_minions()
 
         show_step(4)
+        maas_minion_id = salt_deployed.get_single_pillar(
+            tgt='I@maas:cluster or I@maas:region',
+            pillar="__reclass__:nodename")
+
         params = jenkins.make_defults_params('cvp-sanity')
-        params['TESTS_SETTINGS'] = 'drivetrain_version=proposed'
+        params['TESTS_SETTINGS'] = (
+            'drivetrain_version={0};ntp_skipped_nodes={1}'
+            .format(settings.MCP_VERSION, maas_minion_id))
         build = jenkins.run_build('cvp-sanity', params)
         LOG.info("Take a look test progress here - %s. Build #%s",
                  "http://172.16.44.33:8081/job/cvp-sanity/", build[1])